diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/.gitignore b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/.gitignore new file mode 100644 index 00000000..1fe5870c --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/.gitignore @@ -0,0 +1,8 @@ +*.class +*.iml +*.log +.DS_Store +*.sln.iml +.idea/ + +tsdbctl-* diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/.travis.yml b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/.travis.yml new file mode 100644 index 00000000..44baf85d --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - "1.14.x" + +env: + - GO111MODULE=on TSDB_TEST_TABLE_PATH="TSDB_INTEGRATION_TESTS/$TRAVIS_BUILD_NUMBER" + +script: + - make test + - make lint + - V3IO_TSDB_CONFIG="$TRAVIS_BUILD_DIR/test/ci_v3io.yaml" make integration + - V3IO_TSDB_CONFIG="$TRAVIS_BUILD_DIR/test/ci_v3io_bench.yaml" TSDB_BENCH_INGEST_CONFIG="$TRAVIS_BUILD_DIR/test/benchmark/testdata/tsdb-bench-test-config-ci.yaml" make bench diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile new file mode 100644 index 00000000..dec17c14 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile @@ -0,0 +1,529 @@ +label = "${UUID.randomUUID().toString()}" +BUILD_FOLDER = "/home/jenkins/go" +attempts=15 +git_project = "v3io-tsdb" +git_project_user = "v3io" +git_project_upstream_user = "v3io" +git_deploy_user = "iguazio-prod-git-user" +git_deploy_user_token = "iguazio-prod-git-user-token" +git_deploy_user_private_key = "iguazio-prod-git-user-private-key" + + +def build_nuclio(V3IO_TSDB_VERSION, internal_status="stable") { + withCredentials([ + usernamePassword(credentialsId: git_deploy_user, passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def git_project = 'tsdb-nuclio' + stage('prepare sources') { + container('jnlp') { + if (!fileExists("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}")) { + sh("cd ${BUILD_FOLDER}; git clone https://${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git src/github.com/${git_project_upstream_user}/${git_project}") + } + if ("${internal_status}" == "unstable") { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout development") + } + } else { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout master") + } + } + } + parallel( + 'update tsdb in ingest': { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + rm -rf functions/ingest/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + git clone https://${GIT_TOKEN}@github.com/${git_project_user}/v3io-tsdb.git functions/ingest/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + cd functions/ingest/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + git checkout ${V3IO_TSDB_VERSION} + """ + } + } + container('golang') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}/functions/ingest/vendor/github.com/${git_project_upstream_user}/v3io-tsdb") { + sh """ + GO111MODULE=on go mod vendor + rm -rf .git vendor/github.com/nuclio vendor/github.com/${git_project_upstream_user}/frames/vendor/golang.org/x/net vendor/golang.org/x/net + """ + sh("chown 1000:1000 ./ -R") + } + } + }, + 'update tsdb in query': { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + rm -rf functions/query/vendor/github.com/${git_project_upstream_user}/v3io-tsdb functions/query/vendor/github.com/${git_project_upstream_user}/v3io-go + git clone https://${GIT_TOKEN}@github.com/${git_project_user}/v3io-tsdb.git functions/query/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + cd functions/query/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + git checkout ${V3IO_TSDB_VERSION} + """ + } + } + container('golang') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}/functions/query/vendor/github.com/${git_project_upstream_user}/v3io-tsdb") { + sh """ + GO111MODULE=on go mod vendor + rm -rf .git vendor/github.com/nuclio vendor/github.com/${git_project_upstream_user}/frames/vendor/golang.org/x/net vendor/golang.org/x/net + mv vendor/github.com/v3io/v3io-go ${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}/functions/query/vendor/github.com/${git_project_upstream_user}/v3io-go + """ + sh("chown 1000:1000 ./ -R") + } + } + } + ) + } + + stage('git push') { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + git config --global user.email '${GIT_USERNAME}@iguazio.com' + git config --global user.name '${GIT_USERNAME}' + git remote rm origin + git remote add origin https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git + git add functions/ingest/vendor/github.com functions/query/vendor/github.com; + """ + try { + common.shellc("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") + } catch (err) { + echo "Can not commit" + } + try { + if ( "${internal_status}" == "unstable" ) { + common.shellc("git push origin development") + } else { + common.shellc("git push origin master") + } + } catch (err) { + echo "Can not push code" + } + } + } + container('golang') { + sh("rm -rf ${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") + } + } + } +} + +def build_prometheus(V3IO_TSDB_VERSION, FRAMES_VERSION, internal_status="stable") { + withCredentials([ + usernamePassword(credentialsId: git_deploy_user, passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def git_project = 'prometheus' + + stage('prepare sources') { + container('jnlp') { + if (!fileExists("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}")) { + sh("cd ${BUILD_FOLDER}; git clone https://${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git src/github.com/${git_project_upstream_user}/${git_project}") + } + if ("${internal_status}" == "unstable") { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout development") + } + } else { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout master") + } + } + } + container('golang') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + if("${git_project_user}" != "${git_project_upstream_user}") { + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/v3io-tsdb=github.com/${git_project_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/frames=github.com/${git_project_user}/frames@${FRAMES_VERSION}") + sh("GO111MODULE=on go get") + } else { + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/v3io-tsdb=github.com/${git_project_upstream_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/frames=github.com/${git_project_upstream_user}/frames@${FRAMES_VERSION}") + } + sh("GO111MODULE=on go mod vendor") + sh("chown 1000:1000 ./ -R") + } + } + } + + stage('git push') { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + git config --global user.email '${GIT_USERNAME}@iguazio.com' + git config --global user.name '${GIT_USERNAME}' + git remote rm origin + git remote add origin https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git + git add go.mod go.sum vendor/modules.txt vendor; + """ + try { + common.shellc("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") + } catch (err) { + echo "Can not commit" + } + try { + if ( "${internal_status}" == "unstable" ) { + common.shellc("git push origin development") + } else { + common.shellc("git push origin master") + } + } catch (err) { + echo "Can not push code" + } + } + } + container('golang') { + sh("rm -rf ${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") + } + } + } +} + +def build_frames(V3IO_TSDB_VERSION, internal_status="stable") { + withCredentials([ + usernamePassword(credentialsId: git_deploy_user, passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def git_project = 'frames' + + stage('prepare sources') { + container('jnlp') { + if (!fileExists("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}")) { + sh("cd ${BUILD_FOLDER}; git clone https://${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git src/github.com/${git_project_upstream_user}/${git_project}") + } + if ("${internal_status}" == "unstable") { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout development") + } + } else { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout master") + } + } + } + container('golang') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + if("${git_project_user}" != "${git_project_upstream_user}") { + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/v3io-tsdb=github.com/${git_project_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + sh("GO111MODULE=on go get") + } else { + sh("GO111MODULE=on go get github.com/${git_project_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + } + sh("chown 1000:1000 ./ -R") + } + } + } + + stage('git push') { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + git config --global user.email '${GIT_USERNAME}@iguazio.com' + git config --global user.name '${GIT_USERNAME}' + git remote rm origin + git remote add origin https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git + git add go.mod go.sum + """ + try { + common.shellc("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") + } catch (err) { + echo "Can not commit" + } + try { + if ( "${internal_status}" == "unstable" ) { + common.shellc("git push origin development") + } else { + common.shellc("git push origin master") + } + } catch (err) { + echo "Can not push code" + } + } + } + container('golang') { + sh("rm -rf ${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") + } + } + } +} + +def wait_for_release(V3IO_TSDB_VERSION, next_versions, tasks_list) { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + if (V3IO_TSDB_VERSION != "unstable") { + stage('waiting for prereleases moved to releases') { + container('jnlp') { + i = 0 + def success_count = 0 + + while (true) { + sleep(60) + + def done_count = 0 + + echo "attempt #${i}" + tasks_list.each { project, status -> + if (status == null) { + def RELEASE_SUCCESS = sh( + script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/releases/tags/${next_versions[project]} | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[\"prerelease\"]' | if grep -iq false; then echo 'release'; else echo 'prerelease'; fi", + returnStdout: true + ).trim() + + echo "${project} is ${RELEASE_SUCCESS}" + if (RELEASE_SUCCESS != null && RELEASE_SUCCESS == 'release') { + tasks_list.putAt(project, true) + done_count++ + success_count++ + } else { + def TAG_SHA = sh( + script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/git/refs/tags/${next_versions[project]} | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[\"object\"][\"sha\"]'", + returnStdout: true + ).trim() + + if (TAG_SHA != null) { + def COMMIT_STATUS = sh( + script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/commits/${TAG_SHA}/statuses | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[0][\"state\"]' | if grep -iq error; then echo 'error'; else echo 'ok'; fi", + returnStdout: true + ).trim() + if (COMMIT_STATUS != null && COMMIT_STATUS == 'error') { + tasks_list.putAt(project, false) + done_count++ + } + } + } + } else { + done_count++ + } + } + if (success_count >= tasks_list.size()) { + echo "all releases have been successfully completed" + break + } + + if (done_count >= tasks_list.size() || i++ > attempts) { + def failed = [] + def notcompleted = [] + def error_string = '' + tasks_list.each { project, status -> + if (status == null) { + notcompleted += project + } else if (status == false) { + failed += project + } + } + if (failed.size()) { + error_string += failed.join(',') + ' have been failed :_(. ' + } + if (notcompleted.size()) { + error_string += notcompleted.join(',') + ' have been not completed :(. ' + } + error(error_string) + break + } + } + } + } + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger tsdb-nuclio and prometheus") + } + } + } +} + +podTemplate(label: "${git_project}-${label}", inheritFrom: "jnlp-docker-golang") { + def MAIN_TAG_VERSION + def FRAMES_NEXT_VERSION + def next_versions = ['prometheus':null, 'tsdb-nuclio':null, 'frames':null] + + pipelinex = library(identifier: 'pipelinex@development', retriever: modernSCM( + [$class: 'GitSCMSource', + credentialsId: git_deploy_user_private_key, + remote: "git@github.com:iguazio/pipelinex.git"])).com.iguazio.pipelinex + + common.notify_slack { + node("${git_project}-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + stage('get tag data') { + container('jnlp') { + MAIN_TAG_VERSION = github.get_tag_version(TAG_NAME) + + echo "$MAIN_TAG_VERSION" + } + } + + if (github.check_tag_expiration(git_project, git_project_user, MAIN_TAG_VERSION, GIT_TOKEN)) { + parallel( + 'tsdb-nuclio': { + podTemplate(label: "v3io-tsdb-nuclio-${label}", inheritFrom: "jnlp-docker-golang") { + node("v3io-tsdb-nuclio-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def NEXT_VERSION + + if (MAIN_TAG_VERSION != "unstable") { + stage('get previous release version') { + container('jnlp') { + NEXT_VERSION = github.get_next_short_tag_version("tsdb-nuclio", git_project_user, GIT_TOKEN) + next_versions.putAt("tsdb-nuclio", NEXT_VERSION) + } + } + + build_nuclio(MAIN_TAG_VERSION, "unstable") + build_nuclio(MAIN_TAG_VERSION) + + stage('create tsdb-nuclio prerelease') { + container('jnlp') { + // development has been triggered when committed to it in github-webhook nuclio function + // echo "Triggered tsdb-nuclio development will be builded with last tsdb stable version" + // github.delete_release("tsdb-nuclio", git_project_user, "unstable", GIT_TOKEN) + // github.create_prerelease("tsdb-nuclio", git_project_user, "unstable", GIT_TOKEN, "development") + + echo "Trigger tsdb-nuclio ${NEXT_VERSION} with tsdb ${MAIN_TAG_VERSION}" + github.create_prerelease("tsdb-nuclio", git_project_user, NEXT_VERSION, GIT_TOKEN) + } + } + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger tsdb-nuclio") + } + } + } + } + } + }, + 'frames': { + podTemplate(label: "v3io-frames-${label}", inheritFrom: "jnlp-docker-golang") { + node("v3io-frames-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def NEXT_VERSION + + if (MAIN_TAG_VERSION != "unstable") { + stage('get previous release version') { + container('jnlp') { + NEXT_VERSION = github.get_next_short_tag_version("frames", git_project_user, GIT_TOKEN) + FRAMES_NEXT_VERSION = NEXT_VERSION + next_versions.putAt("frames", NEXT_VERSION) + } + } + + build_frames(MAIN_TAG_VERSION, "unstable") + build_frames(MAIN_TAG_VERSION) + + stage('create frames prerelease') { + container('jnlp') { + // development has been triggered when committed to it in github-webhook nuclio function + // echo "Triggered frames development will be builded with last tsdb stable version" + // github.delete_release("frames", git_project_user, "unstable", GIT_TOKEN) + // github.create_prerelease("frames", git_project_user, "unstable", GIT_TOKEN, "development") + + echo "Trigger frames ${NEXT_VERSION} with tsdb ${MAIN_TAG_VERSION}" + github.create_prerelease("frames", git_project_user, NEXT_VERSION, GIT_TOKEN) + } + } + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger frames") + } + } + } + } + } + } + ) + } + } + } + + node("${git_project}-${label}") { + wait_for_release(MAIN_TAG_VERSION, next_versions, ['tsdb-nuclio': null, 'frames': null]) + } + + // prometheus moved last cos need frames version to build + podTemplate(label: "v3io-tsdb-prometheus-${label}", inheritFrom: "jnlp-docker-golang") { + node("v3io-tsdb-prometheus-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def TAG_VERSION + def NEXT_VERSION + + if (MAIN_TAG_VERSION != "unstable") { + stage('get current version') { + container('jnlp') { + sh """ + cd ${BUILD_FOLDER} + git clone https://${GIT_TOKEN}@github.com/${git_project_user}/prometheus.git src/github.com/prometheus/prometheus + """ + + TAG_VERSION = sh( + script: "cat ${BUILD_FOLDER}/src/github.com/prometheus/prometheus/VERSION", + returnStdout: true + ).trim() + } + } + + if (TAG_VERSION) { + stage('get previous release version') { + container('jnlp') { + NEXT_VERSION = github.get_next_short_tag_version("prometheus", git_project_user, GIT_TOKEN) + echo "$NEXT_VERSION" + next_versions.putAt('prometheus', NEXT_VERSION) + } + } + + build_prometheus(MAIN_TAG_VERSION, FRAMES_NEXT_VERSION, "unstable") + build_prometheus(MAIN_TAG_VERSION, FRAMES_NEXT_VERSION) + + stage('create prometheus prerelease') { + container('jnlp') { + // development has been triggered when committed to it in github-webhook nuclio function + // echo "Triggered prometheus development will be builded with last tsdb stable version" + // github.delete_release("prometheus", git_project_user, "unstable", GIT_TOKEN) + // github.create_prerelease("prometheus", git_project_user, "unstable", GIT_TOKEN, "development") + + echo "Trigger prometheus ${NEXT_VERSION} with tsdb ${MAIN_TAG_VERSION}" + github.create_prerelease("prometheus", git_project_user, NEXT_VERSION, GIT_TOKEN) + } + } + } + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger prometheus") + } + } + } + } + } + + node("${git_project}-${label}") { + wait_for_release(MAIN_TAG_VERSION, next_versions, ['prometheus': null]) + } + + node("${git_project}-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + stage('update release status') { + container('jnlp') { + github.update_release_status(git_project, git_project_user, "${MAIN_TAG_VERSION}", GIT_TOKEN) + } + } + } + } + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/LICENSE b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile new file mode 100644 index 00000000..ebf9b9c1 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile @@ -0,0 +1,106 @@ +GIT_COMMIT_HASH := $(shell git rev-parse HEAD) +GIT_BRANCH=$(shell git rev-parse --abbrev-ref HEAD) +ifeq ($(GIT_BRANCH),) + GIT_BRANCH="N/A" +endif + +ifneq ($(TSDB_LABEL),) + GIT_REVISION := $(TSDB_LABEL) +else + GIT_REVISION := $(shell git describe --always) +endif + +GOOS ?= $(shell go env GOOS) +GOARCH ?= $(shell go env GOARCH) +GOPATH ?= $(shell go env GOPATH) + +TSDBCTL_BIN_NAME := tsdbctl-$(GIT_REVISION)-$(GOOS)-$(GOARCH) + +# Use RFC3339 (ISO8601) date format +BUILD_TIME := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") + +# Use fully qualified package name +CONFIG_PKG=github.com/v3io/v3io-tsdb/pkg/config + +# Use Go linker to set the build metadata +BUILD_OPTS := -ldflags " \ + -X $(CONFIG_PKG).buildTime=$(BUILD_TIME) \ + -X $(CONFIG_PKG).osys=$(GOOS) \ + -X $(CONFIG_PKG).architecture=$(GOARCH) \ + -X $(CONFIG_PKG).version=$(GIT_REVISION) \ + -X $(CONFIG_PKG).commitHash=$(GIT_COMMIT_HASH) \ + -X $(CONFIG_PKG).branch=$(GIT_BRANCH)" \ + -v -o "$(GOPATH)/bin/$(TSDBCTL_BIN_NAME)" + +TSDB_BUILD_COMMAND ?= GO111MODULE="on" CGO_ENABLED=0 go build $(BUILD_OPTS) ./cmd/tsdbctl + +.PHONY: fmt +fmt: + gofmt -l -s -w . + +.PHONY: get +get: + GO111MODULE="on" go mod tidy + +.PHONY: test +test: + go test -v -race -tags unit -count 1 ./... + +.PHONY: integration +integration: + go test -v -race -tags integration -p 1 -count 1 ./... # p=1 to force Go to run pkg tests serially. + +.PHONY: bench +bench: + go test -run=XXX -bench='^BenchmarkIngest$$' -benchtime 10s -timeout 5m ./test/benchmark/... + +.PHONY: build +build: + docker run \ + --volume $(shell pwd):/go/src/github.com/v3io/v3io-tsdb \ + --volume $(shell pwd):/go/bin \ + --workdir /go/src/github.com/v3io/v3io-tsdb \ + --env GOOS=$(GOOS) \ + --env GOARCH=$(GOARCH) \ + golang:1.12 \ + make bin + +.PHONY: bin +bin: + ${TSDB_BUILD_COMMAND} + +PHONY: gofmt +gofmt: +ifeq ($(shell gofmt -l .),) + # gofmt OK +else + $(error Please run `go fmt ./...` to format the code) +endif + +.PHONY: impi +impi: + @echo Installing impi... + GO111MODULE=off go get -u github.com/pavius/impi/cmd/impi + @echo Verifying imports... + $(GOPATH)/bin/impi \ + --local github.com/iguazio/provazio \ + --skip pkg/controller/apis \ + --skip pkg/controller/client \ + --ignore-generated \ + --scheme stdLocalThirdParty \ + ./... + +$(GOPATH)/bin/golangci-lint: + @echo Installing golangci-lint... + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.10.2 + cp ./bin/golangci-lint $(GOPATH)/bin/ + +.PHONY: lint +lint: gofmt impi $(GOPATH)/bin/golangci-lint + @echo Linting... + @$(GOPATH)/bin/golangci-lint run \ + --disable-all --enable=deadcode --enable=goconst --enable=golint --enable=ineffassign \ + --enable=interfacer --enable=unconvert --enable=varcheck --enable=errcheck --enable=gofmt --enable=misspell \ + --enable=staticcheck --enable=gosimple --enable=govet --enable=goconst \ + cmd/... pkg/... internal/... + @echo done linting diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/README.md b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/README.md new file mode 100644 index 00000000..3d77474a --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/README.md @@ -0,0 +1,294 @@ +[![Travis Build Status](https://travis-ci.org/v3io/v3io-tsdb.svg?branch=master)](https://travis-ci.org/v3io/v3io-tsdb) +[![GH Build Status](https://github.com/v3io/v3io-tsdb/workflows/CI/badge.svg)](https://github.com/v3io/v3io-tsdb/actions) + +# V3IO-TSDB +Iguazio API lib for time-series DB access and Prometheus TSDB storage driver. + +> Note: This project is still under development, it requires the latest 1.7 release of iguazio DB (with Blob functions) + +## Overview +Iguazio provides a real-time flexible document database engine which accelerates popular BigData and open-source +frameworks such as Spark and Presto, as well as provide AWS compatible data APIs (DynamoDB, Kinesis, S3). + +Iguazio DB engine runs at the speed of in-memory databases, but uses lower cost and higher density (NVMe) Flash, it has +a unique low-level design with highly parallel processing and OS bypass which treats Flash as async memory pages. + +Iguazio DB low-level APIs (v3io) has rich API semantics and multiple indexing types, those allow it to run multiple +workloads and processing engines on exactly the same data, and consistently read/write the data in different tools. + +This project uses v3io semantics (row & col layouts, arrays, random & sequential indexes, etc.) to provide extremely +fast and scalable Time Series database engine which can be accessed simultaneously by multiple engines and APIs, such as: +- [Prometheus](https://prometheus.io/) Time Series DB (for metrics scraping & queries) +- [nuclio](https://github.com/nuclio/nuclio) serverless functions (for real-time ingestion, stream processing or queries) +- Iguazio DynamoDB API (with extensions) +- Apache Presto & Spark (future item, for SQL & AI) +- Built-in CLI (tsdbctl) for DB creation, ingestion, and queries + +[nuclio](https://github.com/nuclio/nuclio) supports HTTP and a large variety of streaming/triggering options (Kafka, Kinesis +, Azure event-hub, RabbitMQ, NATS, Iguazio streams, MQTT, Cron tasks), it provides automatic deployment and auto-scaling +enabling ingestion from variety of sources at endless scalability. using nuclio functions can be customized to pre-process +incoming data e.g. examine metric data, alert, convert formats, etc. + +
+ +![architecture](timeseries.png) +
+ +## Architecture +The solution stores the raw data in highly compressed column chunks (using Gorilla/XOR compression variation), with one +chunk for every n hours (1hr default), queries will only retrieve and decompress the specific columns based on the +requested time range. + +Users can define pre-aggregates (count, avg, sum, min, max, stddev, stdvar, last, rate) which use v3io update expressions and store +data consistently in arrays per user defined intervals (RollupMin) and/or dimensions (labels). + +![data layout](dataorg.png) + +High-resolution queries will detect the pre-aggregates automatically and selectively access the array ranges +(skip chunk retrieval, decompression, and aggregation) which significantly accelerate searches and provide real-time +response. an extension supports overlapping aggregates (retrieve last 1hr, 6h, 12hr, 24hr stats in a single request), +this is currently not possible via the standard Prometheus TSDB API. + +The data can be partitioned to multiple tables (e.g. one per week) or use a cyclic table (goes back to the first chunk after + it reached the end), multiple tables are stored in a hierarchy under the specified path. + +Metric names and labels are stored in search optimized keys and string attributes. Iguazio DB engine can run full +dimension scan (searches) in the rate of millions of metrics per second, or use selective range based queries to access +a specific metric family. + +The use of v3io random access keys (Hash based) allow real-time sample data ingestion/retrieval and stream processing. + +To maintain high-performance over low-speed connections we implement auto IO throttling, if the link is slow multiple +samples will be pushed in a single operation, users can configure the maximum allowed batch (trade efficiency with +consistency). IO is done using multiple parallel connections/workers enabling maximum throughput regardless of the +link latency. + +## How To Use + +The code is separated to Prometheus compliant adapter in [/promtsdb](promtsdb) and more generic/advanced adapter in +[/pkg/tsdb](pkg/tsdb), you should use the latter for custom functions and code. See a full usage example in +[v3iotsdb_test.go](/pkg/tsdb/v3iotsdb_test.go), both have similar semantics. + +For Prometheus you would need to use the fork found in `https://github.com/v3io/prometheus`, it already loads this +library, you would need to place a `v3io-tsdb-config.yaml` file with relevant configuration in the same folder as the Prometheus +executable (see details on configurations below). + +A developer using this library should first create a TSDB, this can be done using the CLI or an API call (`CreateTSDB`) +which builds the TSDB metadata in the DB. To use the DB you should create an Adapter using the method `NewV3ioAdapter()` +, with the adapter he can create an `Appender` for adding samples or `Querier` for querying the database and retrieving +a set of metrics or aggregates, see the following sections for details. + +A user can run the CLI to add (append) or query the DB, to use the CLI, build the code under [tsdbctl](cmd/tsdbctl), +it has built-in help, see the following add/query examples: + +``` + # create a DB with expected ingestion rate of one sample per second and some aggregates (at 30 min interval) + # and cross-label aggregates for "host" + tsdbctl create -t --ingestion-rate 1/s -a count,sum,max -i 30m -l label1 + + # display DB info with metric names (types) + tsdbctl info -t
-n + + # append a sample (73.2) to the specified metric type (cpu) + labels at the current time + tsdbctl add -t
cpu os=win,node=xyz123 -d 73.2 + + # display all the CPU metrics for win servers from the last hours, in CSV format + tsdbctl query -t
cpu -f "os=='win'" -l 1h -o csv + +``` + +For use with nuclio function you can see function example under [\nuclio](examples/nuclio) + +## API Walkthrough + +### Creating and Configuring a TSDB Adapter + +The first step is to create a TSDB, this is done only once per TSDB and generates the required metadata and configuration +such as partitioning strategy, retention, aggregates, etc. this can be done via the CLI or a function call. + +```go + // Load v3io connection/path details (see YAML below) + v3iocfg, err := config.GetOrLoadFromFile("v3io-tsdb-config.yaml") + if err != nil { + // TODO: handle error + } + + // Specify the default DB configuration (can be modified per partition) + samplesIngestionRate = "1/s" + aggregationGranularity = "1h" + aggregatesList = "scount,avg,min,max" + crossLabel = "label1,label2;label3" + schema, err := schema.NewSchema(v3iocfg, samplesIngestionRate, aggregationGranularity, aggregatesList, crossLabel) + if err != nil { + // TODO: handle error + } + + return tsdb.CreateTSDB(v3iocfg, schema) +``` + +> If you plan on using pre-aggregation to speed aggregate queries you should specify the `Rollups` (function list) and +`RollupMin` (bucket time in minutes) parameters, the supported aggregation functions are: count, sum, avg, min, max, +stddev, stdvar. + +In order to use the TSDB we need to create an adapter, the `NewV3ioAdapter` function accepts 3 +parameters: the configuration structure, v3io data container object and logger object. The last 2 are optional, in case +you already have container and logger (when using nuclio data bindings). + +Configuration is specified in a YAML or JSON format, and can be read from a file using `config.GetOrLoadFromFile(path string)` +or can be loaded from a local buffer using `config.GetOrLoadFromData(data []byte)`. +You can see details on the configuration options in the V3IO TSDB [**config.go**](pkg/config/config.go) source file. +A template configuration file is found at **examples/v3io-tsdb-config.yaml.template**. +You can use it as a reference for creating your own TSDB configuration file. +For example: + +```yaml +webApiEndpoint: "192.168.1.100:8081" +container: "tsdb" +username: "johnd" +password: "OpenSesame" +``` + +Following is an example of code for creating an adapter: + +```go + // create configuration object from file + cfg, err := config.GetOrLoadFromFile("v3io-tsdb-config.yaml") + if err != nil { + // TODO: handle error + } + + // create and start a new TSDB adapter + adapter, err := tsdb.NewV3ioAdapter(cfg, nil, nil) + if err != nil { + // TODO: handle error + } +``` + +### Creating and using an Appender (ingest metrics) + +The `Appender` interface is used to ingest metrics data, there are two functions for it: `Add` and `AddFast` which can be +after we used Add (using the refID returned by Add) to reduce some lookup/hash overhead. + +Example: + +```go + // create an Appender interface + appender, err := adapter.Appender() + if err != nil { + panic(err) + } + + // create metrics labels, `__name__` label specify the metric type (e.g. cpu, temperature, ..) + // the other labels can be used in searches (filtering or grouping) or aggregations + // use utils.LabelsFromStrings(s ...string) for string list input or utils.LabelsFromMap(m map[string]string) for map input + lset := utils.LabelsFromStrings("__name__","http_req", "method", "post") + + // Add a sample with current time (in milisec) and the value of 7.9 + ref, err := appender.Add(lset, time.Now().Unix * 1000, 7.9) + if err != nil { + panic(err) + } + + // Add a second sample using AddFast and the refID from Add + err := appender.AddFast(nil, ref, time.Now().Unix * 1000 + 1000, 8.3) + if err != nil { + panic(err) + } +``` + +### Creating and using a Querier (read metrics and aggregates) + +The `Querier` interface is used to query the database and return one or more metrics, we first need to create a `Querier`, +once we did we can use `Select()` which will return a list of series (as an iterator object). + +Every returned series have two interfaces, `Labels()` which returns the series or aggregate labels, and `Iterator()` +which returns an iterator over the series or aggregate values. + +The `Select()` call accepts a `SelectParams` parameter which has the following properties: +* From (int64) - a timestamp in milliseconds specifying the start time of the query +* To (int64) - a timestamp in milliseconds specifying the end time of the query +* Name (string) - optional, comma separated metric types (e.g. cpu, memory, ..), specifying it accelerate performance (use range queries) +* Step (int64) - optional, the step interval in milliseconds used for the aggregation functions or for downsampling raw data +* Functions (string) - optional, a comma separated list of aggregation functions e.g. `"count,sum,avg,stddev"` +* Filter (string) - optional, V3IO GetItems filter expression for selecting the desired metrics e.g. `_name=='http_req'` +* GroupBy (string) - optional, a comma seperated list of labels to group the results by e.g. `"method"` +* RequestedColumns ([]RequestedColumn) - optional, as an alternative to `Name` & `Function` a user can pass a list of `RequestedColumn` object that specify which metrics and aggregates to query. + Using this API it is possible to query several metrics in the same query. + + +Using `Functions` and `Step` is optional, use it only when you are interested in pre-aggregation and the step is >> than +the sampling interval (and preferably equal or greater than the partition RollupMin interval). +There are two types of aggregates: +* aggregates over time - aggregates the data into buckets over a period of time. This will result in a series for every unique label set per aggregate. +* aggregates across series - aggregates the data for all the different label sets into one series per aggregate. Add an `_all` suffix to the aggregate name to use this kind of aggregation. + +In both cases, the `Aggregate` label will be added to that series with the function name. +But, a user can use an aggregate over time **or** aggregate across series but not both in the same query. + +creating a querier: + +```go + qry, err := adapter.QuerierV2() + if err != nil { + panic(err) + } +``` + +Simple select example (no aggregates): +```go + params := &pquerier.SelectParams{Name: "http_req", + Filter: "method=='post'", + From: minTime, + To: maxTime} + set, err := qry.Select(params) +``` + +Select using aggregates: + +```go + params := &pquerier.SelectParams{Name: "http_req", + Filter: "method=='post'", + From: minTime, + To: maxTime, + Step: 1000*3600, + Functions: "count,avg,sum,max"} + set, err := qry.Select(params) +``` + +Select using RequestedColumns: + +```go + wantedColumns: []pquerier.RequestedColumn{{Metric: "http_req", Function: "avg"}, + {Metric: "http_req", Function: "count"}, + {Metric: "http_req", Function: "max"}, + {Metric: "tcp_req", Function: "avg"}} + params := &pquerier.SelectParams{RequestedColumns: wantedColumns + Filter: "method=='post'", + From: minTime, + To: maxTime, + Step: 1000*3600} + set, err := qry.Select(params) +``` + +Once we obtain a set using one of the methods above we can iterate over the set and the individual series in the following way: + +```go + for set.Next() { + if set.Err() != nil { + panic(set.Err()) + } + + series := set.At() + fmt.Println("\nLables:", series.Labels()) + iter := series.Iterator() + for iter.Next() { + if iter.Err() != nil { + panic(iter.Err()) + } + + t, v := iter.At() + fmt.Printf("t=%d,v=%.2f ", t, v) + } + fmt.Println() + } +``` diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go new file mode 100644 index 00000000..2b62ab35 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go @@ -0,0 +1,27 @@ +package main + +import ( + "os" + + "github.com/v3io/v3io-tsdb/pkg/tsdbctl" +) + +func main() { + if err := Run(); err != nil { + os.Exit(1) + } + os.Exit(0) +} + +func Run() error { + rootCmd := tsdbctl.NewRootCommandeer() + defer tearDown(rootCmd) + return rootCmd.Execute() +} + +func tearDown(cmd *tsdbctl.RootCommandeer) { + if cmd.Reporter != nil { // could be nil if has failed on initialisation + // nolint: errcheck + cmd.Reporter.Stop() + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/dataorg.png b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/dataorg.png new file mode 100644 index 00000000..b34a5532 Binary files /dev/null and b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/dataorg.png differ diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/ingest/ingest_example.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/ingest/ingest_example.go new file mode 100644 index 00000000..fb9bb1e3 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/ingest/ingest_example.go @@ -0,0 +1,197 @@ +package main + +import ( + "encoding/json" + "os" + "sort" + "strconv" + "strings" + "sync" + + "github.com/nuclio/nuclio-sdk-go" + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +/* +Example event: +{ + "metric": "cpu", + "labels": { + "dc": "7", + "hostname": "mybesthost" + }, + "samples": [ + { + "t": "1532595945142", + "v": { + "N": 95.2 + } + }, + { + "t": "1532595948517", + "v": { + "n": 86.8 + } + } + ] +} +*/ + +type value struct { + N float64 `json:"n,omitempty"` +} + +type sample struct { + Time string `json:"t"` + Value value `json:"v"` +} + +type request struct { + Metric string `json:"metric"` + Labels map[string]string `json:"labels,omitempty"` + Samples []sample `json:"samples"` +} + +var tsdbAppender tsdb.Appender +var tsdbAppenderMtx sync.Mutex + +func Handler(context *nuclio.Context, event nuclio.Event) (interface{}, error) { + var request request + + // parse body + if err := json.Unmarshal(event.GetBody(), &request); err != nil { + return "", nuclio.WrapErrBadRequest(err) + } + + if strings.TrimSpace(request.Metric) == "" { + return nil, nuclio.WrapErrBadRequest(errors.New(`request is missing the mandatory 'metric' field`)) + } + + // convert the map[string]string -> []Labels + labels := getLabelsFromRequest(request.Metric, request.Labels) + + var ref uint64 + // iterate over request samples + for _, sample := range request.Samples { + + // if time is not specified assume "now" + if sample.Time == "" { + sample.Time = "now" + } + // convert time string to time int, string can be: now, now-2h, int (unix milisec time), or RFC3339 date string + sampleTime, err := utils.Str2unixTime(sample.Time) + if err != nil { + return "", errors.Wrap(err, "Failed to parse time: "+sample.Time) + } + // append sample to metric + if ref == 0 { + ref, err = tsdbAppender.Add(labels, sampleTime, sample.Value.N) + } else { + err = tsdbAppender.AddFast(labels, ref, sampleTime, sample.Value.N) + } + if err != nil { + return "", errors.Wrap(err, "Failed to add sample") + } + } + + return "", nil +} + +// InitContext runs only once when the function runtime starts +func InitContext(context *nuclio.Context) error { + var err error + + // get configuration from env + tsdbTablePath := os.Getenv("INGEST_V3IO_TSDB_PATH") + if tsdbTablePath == "" { + return errors.New("INGEST_V3IO_TSDB_PATH must be set") + } + + context.Logger.InfoWith("Initializing", "tsdbTablePath", tsdbTablePath) + + // create TSDB appender + err = createTSDBAppender(context, tsdbTablePath) + if err != nil { + return err + } + + return nil +} + +// convert map[string]string -> utils.Labels +func getLabelsFromRequest(metricName string, labelsFromRequest map[string]string) utils.Labels { + + // adding 1 for metric name + labels := make(utils.Labels, 0, len(labelsFromRequest)+1) + + // add the metric name + labels = append(labels, utils.Label{ + Name: "__name__", + Value: metricName, + }) + + for labelKey, labelValue := range labelsFromRequest { + labels = append(labels, utils.Label{ + Name: labelKey, + Value: labelValue, + }) + } + + sort.Sort(labels) + + return labels +} + +func createTSDBAppender(context *nuclio.Context, path string) error { + context.Logger.InfoWith("Creating TSDB appender", "path", path) + + defer tsdbAppenderMtx.Unlock() + tsdbAppenderMtx.Lock() + + if tsdbAppender == nil { + v3ioConfig, err := config.GetOrLoadFromStruct(&config.V3ioConfig{ + TablePath: path, + }) + if err != nil { + return err + } + v3ioUrl := os.Getenv("V3IO_URL") + numWorkersStr := os.Getenv("V3IO_NUM_WORKERS") + var numWorkers int + if len(numWorkersStr) > 0 { + numWorkers, err = strconv.Atoi(numWorkersStr) + if err != nil { + return err + } + } else { + numWorkers = 8 + } + username := os.Getenv("V3IO_USERNAME") + if username == "" { + username = "iguazio" + } + password := os.Getenv("V3IO_PASSWORD") + containerName := os.Getenv("V3IO_CONTAINER") + if containerName == "" { + containerName = "bigdata" + } + container, err := tsdb.NewContainer(v3ioUrl, numWorkers, "", username, password, containerName, context.Logger) + if err != nil { + return err + } + // create adapter once for all contexts + adapter, err := tsdb.NewV3ioAdapter(v3ioConfig, container, context.Logger) + if err != nil { + return err + } + tsdbAppender, err = adapter.Appender() + if err != nil { + return err + } + } + + return nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/query/query_example.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/query/query_example.go new file mode 100644 index 00000000..abccd980 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/query/query_example.go @@ -0,0 +1,151 @@ +package main + +import ( + "bytes" + "encoding/json" + "os" + "strconv" + "strings" + "sync" + + "github.com/nuclio/nuclio-sdk-go" + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/formatter" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +/* +Example request: +{ + "metric": "cpu", + "step": "1m", + "start_time": "1532095945142", + "end_time": "1642995948517" +} +*/ + +type request struct { + Metric string `json:"metric"` + Aggregators []string `json:"aggregators"` + FilterExpression string `json:"filter_expression"` + Step string `json:"step"` + StartTime string `json:"start_time"` + EndTime string `json:"end_time"` + Last string `json:"last"` +} + +var tsdbQuerier *pquerier.V3ioQuerier +var tsdbQuerierMtx sync.Mutex + +func Handler(context *nuclio.Context, event nuclio.Event) (interface{}, error) { + request := request{} + + // try to unmarshal the request. return bad request if failed + if err := json.Unmarshal(event.GetBody(), &request); err != nil { + return nil, nuclio.WrapErrBadRequest(err) + } + + context.Logger.DebugWith("Got query request", "request", request) + + // convert string times (unix or RFC3339 or relative like now-2h) to unix milisec times + from, to, step, err := utils.GetTimeFromRange(request.StartTime, request.EndTime, request.Last, request.Step) + if err != nil { + return nil, nuclio.WrapErrBadRequest(errors.Wrap(err, "Error parsing query time range")) + } + + params := &pquerier.SelectParams{Name: request.Metric, + Functions: strings.Join(request.Aggregators, ","), + Step: step, + Filter: request.FilterExpression, + From: from, + To: to} + // Select query to get back a series set iterator + seriesSet, err := tsdbQuerier.Select(params) + if err != nil { + return nil, errors.Wrap(err, "Failed to execute query select") + } + + // convert SeriesSet to JSON (Grafana simpleJson format) + jsonFormatter, err := formatter.NewFormatter("json", nil) + if err != nil { + return nil, errors.Wrap(err, "failed to start json formatter") + } + + var buffer bytes.Buffer + err = jsonFormatter.Write(&buffer, seriesSet) + + return buffer.String(), err +} + +// InitContext runs only once when the function runtime starts +func InitContext(context *nuclio.Context) error { + + // get configuration from env + tsdbTablePath := os.Getenv("QUERY_V3IO_TSDB_PATH") + if tsdbTablePath == "" { + return errors.New("QUERY_V3IO_TSDB_PATH must be set") + } + + context.Logger.InfoWith("Initializing", "tsdbTablePath", tsdbTablePath) + + // create v3io adapter + err := createV3ioQuerier(context, tsdbTablePath) + if err != nil { + return errors.Wrap(err, "Failed to initialize querier") + } + return nil +} + +func createV3ioQuerier(context *nuclio.Context, path string) error { + context.Logger.InfoWith("Creating v3io adapter", "path", path) + + defer tsdbQuerierMtx.Unlock() + tsdbQuerierMtx.Lock() + + if tsdbQuerier == nil { + v3ioConfig, err := config.GetOrLoadFromStruct(&config.V3ioConfig{ + TablePath: path, + }) + if err != nil { + return err + } + v3ioUrl := os.Getenv("V3IO_URL") + numWorkersStr := os.Getenv("V3IO_NUM_WORKERS") + var numWorkers int + if len(numWorkersStr) > 0 { + numWorkers, err = strconv.Atoi(numWorkersStr) + if err != nil { + return err + } + } else { + numWorkers = 8 + } + username := os.Getenv("V3IO_USERNAME") + if username == "" { + username = "iguazio" + } + password := os.Getenv("V3IO_PASSWORD") + containerName := os.Getenv("V3IO_CONTAINER") + if containerName == "" { + containerName = "bigdata" + } + container, err := tsdb.NewContainer(v3ioUrl, numWorkers, "", username, password, containerName, context.Logger) + if err != nil { + return err + } + // create adapter once for all contexts + adapter, err := tsdb.NewV3ioAdapter(v3ioConfig, container, context.Logger) + if err != nil { + return err + } + // Create TSDB Querier + tsdbQuerier, err = adapter.QuerierV2() + if err != nil { + return errors.Wrap(err, "Failed to initialize querier") + } + } + return nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/examples/v3io-tsdb-config.yaml.template b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/examples/v3io-tsdb-config.yaml.template new file mode 100644 index 00000000..e5db2b3b --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/examples/v3io-tsdb-config.yaml.template @@ -0,0 +1,24 @@ +# File: v3io-tsdb-config.yaml +# Description: Template of a V3IO TSDB Configuration File + +# TODO: In your configuration file, delete the configuration keys that you +# don't need and replace the "<...>" placeholders. + +# Endpoint of an Iguazio Data Science Platform web-gateway (web-API) service, +# consisting of an IP address or resolvable host domain name, and a port number +# (currently, always port 8081) +# Example: "192.168.1.100:8081" +webApiEndpoint: ":8081" + +# Name of an Iguazio Data Science Platform container for storing the TSDB table +# Example: "bigdata" +container: "" + +# Log level +# Valid values: "debug" | "info" | "warn" | "error" +logLevel: "warn" + +# Authentication credentials for the web-API service +username: "" +password: "" + diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod new file mode 100644 index 00000000..3f5c1b4a --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod @@ -0,0 +1,27 @@ +module github.com/v3io/v3io-tsdb + +go 1.14 + +require ( + github.com/cespare/xxhash v1.1.0 + github.com/ghodss/yaml v1.0.0 + github.com/imdario/mergo v0.3.7 + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/nuclio/logger v0.0.1 + github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324 + github.com/nuclio/zap v0.0.2 + github.com/pkg/errors v0.8.1 + github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a + github.com/spf13/cobra v0.0.3 + github.com/stretchr/testify v1.4.0 + github.com/v3io/frames v0.7.10 + github.com/v3io/v3io-go v0.1.5-0.20200416113214-f1b82b9a8e82 + github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 +) + +replace ( + github.com/v3io/v3io-tsdb => ./ + github.com/xwb1989/sqlparser => github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871 + labix.org/v2/mgo => github.com/go-mgo/mgo v0.0.0-20180705113738-7446a0344b7872c067b3d6e1b7642571eafbae17 + launchpad.net/gocheck => github.com/go-check/check v0.0.0-20180628173108-788fd78401277ebd861206a03c884797c6ec5541 +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum new file mode 100644 index 00000000..6f9239cc --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum @@ -0,0 +1,122 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc70QXJ07+2eg2Jy2EC7Mi11BWujeM= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/nuclio/errors v0.0.1 h1:JoADBDnhRKjW05Npu5CLS27Peo7gx+QZcNrLwINV6UY= +github.com/nuclio/errors v0.0.1/go.mod h1:it2rUqDarIL8PasLYZo0Q1Ebsx4NRPM+OyYYakgNyrQ= +github.com/nuclio/logger v0.0.0-20190303161055-fc1e4b16d127/go.mod h1:ttazNAqTxKjQ7XrGDZxecumGa9KCIuJh88gzFY1mRXo= +github.com/nuclio/logger v0.0.1 h1:e+vT/Ug65RC+u0QX2J+lq3P57ZBwJ1ZA6Q2LCEcViwE= +github.com/nuclio/logger v0.0.1/go.mod h1:ttazNAqTxKjQ7XrGDZxecumGa9KCIuJh88gzFY1mRXo= +github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324 h1:wSCJEH8mUQ3VTyUukbYdxmi0UMmB14Lu1GOlNOs0dWY= +github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324/go.mod h1:NqMgotiF6Y0Ho4+i5AvJhH3FRKAyL4IMaMv/eoUOkKQ= +github.com/nuclio/zap v0.0.2 h1:rY5PkMOl8CTkqRqIPuxziBiKK6Mq/8oEurfgRnNtqf0= +github.com/nuclio/zap v0.0.2/go.mod h1:SUxPsgePvlyjx6c5MtGdB50pf0IQThtlyLwISLboeuc= +github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8 h1:WqLgmr/wj9TO5Sc6oYPQRAJBxuHE0NTeuVeFnT+FZVo= +github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8/go.mod h1:6FWOCx06uh50GClv8S2cfk3asqTJs3qq3ZNRtLZE77I= +github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rs/xid v1.1.0/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y= +github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/v3io/frames v0.7.10 h1:u5hZNOgrtuuZDqOXHgvwWNnNvGCOkByP+7c9peDZY2w= +github.com/v3io/frames v0.7.10/go.mod h1:33CcutEG8loyOg7NWpOLujqg0EN7Ofjojk7Uh5uqFHQ= +github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871 h1:myF4tU/HdFWU1UzMdf16cHRbownzsyvL7VKIHqkrSvo= +github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871/go.mod h1:QD2Bo64oyTWzeV8RFehXS0hZEDFgOK99/h2a6ErRu6E= +github.com/v3io/v3io-go v0.1.5-0.20200416113214-f1b82b9a8e82 h1:4LEQnRvqUtAk++AOKlrIUa13KJmmc7i4dy+gFej4vQk= +github.com/v3io/v3io-go v0.1.5-0.20200416113214-f1b82b9a8e82/go.mod h1:D0W1tjsVgcp4xk3ZI2fjKTKaOpYJLewN1BPN0x2osO4= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0 h1:dzZJf2IuMiclVjdw0kkT+f9u4YdrapbNyGAN47E/qnk= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181026194446-8b5d7a19e2d9 h1:26lptpu+T60F849wXfTQMz9ecFf6nTQM0J1JjLSga5U= +google.golang.org/genproto v0.0.0-20181026194446-8b5d7a19e2d9/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +zombiezen.com/go/capnproto2 v2.17.0+incompatible h1:sIoKPFGNlM38Qh+PBLa9Wzg1j99oInS/Qlk+5N/CHa4= +zombiezen.com/go/capnproto2 v2.17.0+incompatible/go.mod h1:XO5Pr2SbXgqZwn0m0Ru54QBqpOf4K5AYBO+8LAOBQEQ= diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go new file mode 100644 index 00000000..8f2c5f37 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go @@ -0,0 +1,196 @@ +package performance + +import ( + "fmt" + "io" + "log" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" + "github.com/rcrowley/go-metrics" + "github.com/v3io/v3io-tsdb/pkg/config" +) + +const ( + reservoirSize = 100 +) + +var instance *MetricReporter +var once sync.Once + +const ( + STDOUT = "stdout" + STDERR = "stderr" +) + +type MetricReporter struct { + lock sync.Mutex + running bool + registry metrics.Registry + logWriter io.Writer + reportPeriodically bool + reportIntervalSeconds int + reportOnShutdown bool +} + +func DefaultReporterInstance() (reporter *MetricReporter, err error) { + cfg, err := config.GetOrDefaultConfig() + + if err != nil { + // DO NOT return the error to prevent failures of unit tests + fmt.Fprintf(os.Stderr, "unable to load configuration. Reason: %v\n"+ + "Will use default reporter configuration instead.", err) + reporter = ReporterInstance(STDOUT, true, 60, true) + } else { + reporter = ReporterInstanceFromConfig(cfg) + } + + return reporter, nil +} + +func ReporterInstance(writeTo string, reportPeriodically bool, reportIntervalSeconds int, reportOnShutdown bool) *MetricReporter { + once.Do(func() { + var writer io.Writer + switch writeTo { + case STDOUT: + writer = os.Stdout + case STDERR: + writer = os.Stderr + default: + writer = os.Stdout + } + + instance = newMetricReporter(writer, reportPeriodically, reportIntervalSeconds, reportOnShutdown) + }) + return instance +} + +func ReporterInstanceFromConfig(config *config.V3ioConfig) *MetricReporter { + return ReporterInstance( + config.MetricsReporter.Output, + config.MetricsReporter.ReportPeriodically, + config.MetricsReporter.RepotInterval, + config.MetricsReporter.ReportOnShutdown) +} + +func (mr *MetricReporter) Start() error { + mr.lock.Lock() + defer mr.lock.Unlock() + + if mr.isEnabled() && !mr.running { + mr.running = true + } else { + return errors.Errorf("metric reporter is already running.") + } + + return nil +} + +func (mr *MetricReporter) Stop() error { + mr.lock.Lock() + defer mr.lock.Unlock() + + if mr.running { + mr.running = false + if mr.reportOnShutdown { + time.Sleep(300 * time.Millisecond) // postpone performance report on shutdown to avoid mixing with other log messages + metrics.WriteOnce(mr.registry, mr.logWriter) + } + mr.registry.UnregisterAll() + } else { + return errors.Errorf("can't stop metric reporter since it's not running.") + } + + return nil +} + +func (mr *MetricReporter) WithTimer(name string, body func()) { + if mr.isRunning() { + timer := metrics.GetOrRegisterTimer(name, mr.registry) + timer.Time(body) + } else { + body() + } +} + +func (mr *MetricReporter) IncrementCounter(name string, count int64) { + if mr.isRunning() { + counter := metrics.GetOrRegisterCounter(name, mr.registry) + counter.Inc(count) + } +} + +func (mr *MetricReporter) UpdateMeter(name string, count int64) { + if mr.isRunning() { + meter := metrics.GetOrRegisterMeter(name, mr.registry) + meter.Mark(count) + } +} + +func (mr *MetricReporter) UpdateHistogram(name string, value int64) { + if mr.isRunning() { + histogram := metrics.GetOrRegisterHistogram(name, mr.registry, metrics.NewUniformSample(reservoirSize)) + histogram.Update(value) + } +} + +// Listen to the SIGINT and SIGTERM +// SIGINT will listen to CTRL-C. +// SIGTERM will be caught if kill command executed. +func (mr *MetricReporter) registerShutdownHook() { + var gracefulStop = make(chan os.Signal) + // Register for specific signals + signal.Notify(gracefulStop, syscall.SIGINT, syscall.SIGTERM) + + go func() { + sig := <-gracefulStop + _, err := mr.logWriter.Write([]byte(fmt.Sprintf("\n**************************\ncaught sig: %+v\n**************************\n", sig))) + if err == nil { + metrics.WriteOnce(mr.registry, mr.logWriter) + } + }() +} + +func newMetricReporter(outputWriter io.Writer, reportPeriodically bool, reportIntervalSeconds int, reportOnShutdown bool) *MetricReporter { + var writer io.Writer + + if outputWriter != nil { + writer = outputWriter + } else { + writer = os.Stderr + } + + reporter := MetricReporter{ + registry: metrics.NewPrefixedRegistry("v3io-tsdb -> "), + logWriter: writer, + running: true, + reportPeriodically: reportPeriodically, + reportIntervalSeconds: reportIntervalSeconds, + reportOnShutdown: reportOnShutdown, + } + + if reportPeriodically && reportIntervalSeconds > 0 { + // Log periodically + go metrics.Log(reporter.registry, + time.Duration(reportIntervalSeconds)*time.Second, + log.New(reporter.logWriter, "metrics: ", log.Lmicroseconds)) + } + + if reportOnShutdown { + reporter.registerShutdownHook() + } + + return &reporter +} + +func (mr *MetricReporter) isEnabled() bool { + return mr.reportOnShutdown || mr.reportPeriodically +} + +func (mr *MetricReporter) isRunning() bool { + return false +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go new file mode 100644 index 00000000..64d85d16 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go @@ -0,0 +1,419 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package aggregate + +import ( + "fmt" + "math" + "strings" + + "github.com/v3io/v3io-tsdb/pkg/config" +) + +type AggrType uint16 + +// Aggregation functions +const ( + AggregateLabel = "Aggregate" + CrossSeriesSuffix = "_all" + + aggrTypeNone AggrType = 0 + aggrTypeCount AggrType = 1 + aggrTypeSum AggrType = 2 + aggrTypeSqr AggrType = 4 + aggrTypeMax AggrType = 8 + aggrTypeMin AggrType = 16 + aggrTypeLast AggrType = 32 + + // Derived aggregates + aggrTypeAvg AggrType = aggrTypeCount | aggrTypeSum + aggrTypeRate AggrType = aggrTypeLast | 0x8000 + aggrTypeStddev AggrType = aggrTypeCount | aggrTypeSum | aggrTypeSqr + aggrTypeStdvar AggrType = aggrTypeCount | aggrTypeSum | aggrTypeSqr | 0x8000 + aggrTypeAll AggrType = 0xffff +) + +var rawAggregates = []AggrType{aggrTypeCount, aggrTypeSum, aggrTypeSqr, aggrTypeMax, aggrTypeMin, aggrTypeLast} +var rawAggregatesMask = aggrTypeCount | aggrTypeSum | aggrTypeSqr | aggrTypeMax | aggrTypeMin | aggrTypeLast + +var aggrTypeString = map[string]AggrType{ + "count": aggrTypeCount, "sum": aggrTypeSum, "sqr": aggrTypeSqr, "max": aggrTypeMax, "min": aggrTypeMin, + "last": aggrTypeLast, "avg": aggrTypeAvg, "rate": aggrTypeRate, + "stddev": aggrTypeStddev, "stdvar": aggrTypeStdvar, "*": aggrTypeAll} + +var aggrToString = map[AggrType]string{ + aggrTypeCount: "count", aggrTypeSum: "sum", aggrTypeSqr: "sqr", aggrTypeMin: "min", aggrTypeMax: "max", + aggrTypeLast: "last", aggrTypeAvg: "avg", aggrTypeRate: "rate", + aggrTypeStddev: "stddev", aggrTypeStdvar: "stdvar", aggrTypeAll: "*", +} + +var aggrToSchemaField = map[string]config.SchemaField{ + "count": {Name: "count", Type: "array", Nullable: true, Items: "double"}, + "sum": {Name: "sum", Type: "array", Nullable: true, Items: "double"}, + "sqr": {Name: "sqr", Type: "array", Nullable: true, Items: "double"}, + "max": {Name: "max", Type: "array", Nullable: true, Items: "double"}, + "min": {Name: "min", Type: "array", Nullable: true, Items: "double"}, + "last": {Name: "last", Type: "array", Nullable: true, Items: "double"}, + "avg": {Name: "avg", Type: "array", Nullable: true, Items: "double"}, + "rate": {Name: "rate", Type: "array", Nullable: true, Items: "double"}, + "stddev": {Name: "stddev", Type: "array", Nullable: true, Items: "double"}, + "stdvar": {Name: "stdvar", Type: "array", Nullable: true, Items: "double"}, +} + +func (a AggrType) HasAverage() bool { + return (a & aggrTypeAvg) == aggrTypeAvg +} + +func SchemaFieldFromString(aggregates []string, col string) ([]config.SchemaField, error) { + fieldList := make([]config.SchemaField, 0, len(aggregates)) + for _, s := range aggregates { + trimmed := strings.TrimSpace(s) + if trimmed != "" { + if trimmed == "*" { + fieldList = make([]config.SchemaField, 0, len(aggrToSchemaField)) + for _, val := range aggrToSchemaField { + fieldList = append(fieldList, getAggrFullName(val, col)) + } + } else { + field, ok := aggrToSchemaField[trimmed] + if !ok { + return nil, fmt.Errorf("invalid aggragator type '%s'", trimmed) + } + fieldList = append(fieldList, getAggrFullName(field, col)) + } + } + } + return fieldList, nil +} + +func getAggrFullName(field config.SchemaField, col string) config.SchemaField { + fullName := fmt.Sprintf("_%s_%s", col, field.Name) + field.Name = fullName + return field +} + +func (a AggrType) String() string { return aggrToString[a] } + +func RawAggregatesToStringList(aggregates string) ([]string, error) { + aggrs := strings.Split(aggregates, ",") + aggType, _, err := AggregatesFromStringListWithCount(aggrs) + if err != nil { + return nil, err + } + var list []string + for _, aggr := range rawAggregates { + if aggr&aggType != 0 { + list = append(list, aggrToString[aggr]) + } + } + + return list, nil +} + +func ParseCrossLabelSets(str string) [][]string { + var res [][]string + labelSetStrings := strings.Split(str, ";") + for _, labelSetString := range labelSetStrings { + labelSet := strings.Split(strings.TrimSpace(labelSetString), ",") + var trimmedLabelSet []string + for _, label := range labelSet { + trimmedLabel := strings.TrimSpace(label) + if trimmedLabel != "" { + trimmedLabelSet = append(trimmedLabelSet, trimmedLabel) + } + } + if len(trimmedLabelSet) > 0 { + res = append(res, trimmedLabelSet) + } + } + return res +} + +// Convert a comma-separated aggregation-functions string to an aggregates mask +func AggregatesFromStringListWithCount(split []string) (AggrType, []AggrType, error) { + var aggrMask AggrType + var aggrList []AggrType + + var hasAggregates bool + for _, s := range split { + aggr, err := FromString(s) + if err != nil { + return 0, nil, err + } + if aggr != 0 { + hasAggregates = true + aggrMask = aggrMask | aggr + aggrList = append(aggrList, aggr) + } + } + // Always have count aggregate by default + if hasAggregates { + aggrMask = aggrMask | aggrTypeCount + aggrList = append(aggrList, aggrTypeCount) + } + return aggrMask, aggrList, nil +} + +func FromString(aggrString string) (AggrType, error) { + trimmed := strings.TrimSpace(aggrString) + if trimmed == "" { + return 0, nil + } + aggr, ok := aggrTypeString[trimmed] + if !ok { + return 0, fmt.Errorf("invalid aggragate type: %v", trimmed) + } + return aggr, nil +} + +// Create a list of aggregate objects from an aggregates mask +func NewAggregatesList(aggrType AggrType) *AggregatesList { + list := AggregatesList{} + if (aggrType & aggrTypeCount) != 0 { + list = append(list, &CountAggregate{}) + } + if (aggrType & aggrTypeSum) != 0 { + list = append(list, &SumAggregate{FloatAggregate{attr: "sum"}}) + } + if (aggrType & aggrTypeSqr) != 0 { + list = append(list, &SqrAggregate{FloatAggregate{attr: "sqr"}}) + } + if (aggrType & aggrTypeMin) != 0 { + list = append(list, &MinAggregate{FloatAggregate{attr: "min", val: math.Inf(1)}}) + } + if (aggrType & aggrTypeMax) != 0 { + list = append(list, &MaxAggregate{FloatAggregate{attr: "max", val: math.Inf(-1)}}) + } + if (aggrType & aggrTypeLast) != 0 { + list = append(list, &LastAggregate{FloatAggregate{attr: "last", val: math.Inf(-1)}, 0}) + } + return &list +} + +// List of aggregates +type AggregatesList []Aggregate + +// Append a value to all aggregates +func (a AggregatesList) Aggregate(t int64, val interface{}) { + v, ok := val.(float64) + if !ok { + return + } + for _, aggr := range a { + aggr.Aggregate(t, v) + } +} + +// Return an update expression for the aggregates in the given aggregates list +func (a AggregatesList) UpdateExpr(col string, bucket int) string { + expr := "" + for _, aggr := range a { + expr = expr + aggr.UpdateExpr(col, bucket) + } + return expr +} + +// Return an aggregates set expression (first value) or update expression +func (a AggregatesList) SetOrUpdateExpr(col string, bucket int, isNew bool) string { + if isNew { + return a.SetExpr(col, bucket) + } + return a.UpdateExpr(col, bucket) +} + +func (a AggregatesList) SetExpr(col string, bucket int) string { + expr := "" + for _, aggr := range a { + expr = expr + aggr.SetExpr(col, bucket) + } + return expr +} + +// Return an aggregates array-initialization expression +func (a AggregatesList) InitExpr(col string, buckets int) string { + expr := "" + for _, aggr := range a { + expr = expr + aggr.InitExpr(col, buckets) + } + return expr +} + +// Clear all aggregates +func (a AggregatesList) Clear() { + for _, aggr := range a { + aggr.Clear() + } +} + +func GetHiddenAggregates(mask AggrType, requestedAggregates []AggrType) []AggrType { + var hiddenAggregates []AggrType + + for _, aggr := range rawAggregates { + if aggr&mask == aggr && !ContainsAggregate(requestedAggregates, aggr) { + hiddenAggregates = append(hiddenAggregates, aggr) + } + } + return hiddenAggregates +} + +func GetHiddenAggregatesWithCount(mask AggrType, requestedAggregates []AggrType) []AggrType { + mask |= aggrTypeCount + return GetHiddenAggregates(mask, requestedAggregates) +} + +func ContainsAggregate(list []AggrType, item AggrType) bool { + for _, v := range list { + if v == item { + return true + } + } + return false +} + +func IsRawAggregate(item AggrType) bool { return ContainsAggregate(rawAggregates, item) } + +func IsCountAggregate(aggr AggrType) bool { return aggr == aggrTypeCount } + +func HasAggregates(mask AggrType) bool { return mask != aggrTypeNone } + +func MaskToString(mask AggrType) string { + var output strings.Builder + aggCount := 0 + for _, raw := range rawAggregates { + if mask&raw == raw { + if aggCount != 0 { + output.WriteString(",") + } + output.WriteString(aggrToString[raw]) + aggCount++ + } + } + + return output.String() +} + +func ToAttrName(aggr AggrType) string { + return config.AggregateAttrPrefix + aggr.String() +} + +func GetServerAggregationsFunction(aggr AggrType) (func(interface{}, interface{}) interface{}, error) { + switch aggr { + case aggrTypeCount: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return old.(float64) + next.(float64) + }, nil + case aggrTypeSum: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return old.(float64) + next.(float64) + }, nil + case aggrTypeSqr: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return old.(float64) + next.(float64) + }, nil + case aggrTypeMin: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return math.Min(old.(float64), next.(float64)) + }, nil + case aggrTypeMax: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return math.Max(old.(float64), next.(float64)) + }, nil + case aggrTypeLast: + return func(_, next interface{}) interface{} { + return next + }, nil + default: + return nil, fmt.Errorf("unsupported server side aggregate %v", aggrToString[aggr]) + } +} + +func GetServerVirtualAggregationFunction(aggr AggrType) (func([]float64) float64, error) { + switch aggr { + case aggrTypeAvg: + return func(data []float64) float64 { + count := data[0] + sum := data[1] + return sum / count + }, nil + case aggrTypeStddev: + return func(data []float64) float64 { + count := data[0] + sum := data[1] + sqr := data[2] + return math.Sqrt((count*sqr - sum*sum) / (count * (count - 1))) + }, nil + case aggrTypeStdvar: + return func(data []float64) float64 { + count := data[0] + sum := data[1] + sqr := data[2] + return (count*sqr - sum*sum) / (count * (count - 1)) + }, nil + default: + return nil, fmt.Errorf("cannot aggregate %v", aggrToString[aggr]) + } +} + +func GetClientAggregationsFunction(aggr AggrType) (func(interface{}, interface{}) interface{}, error) { + switch aggr { + case aggrTypeCount: + return func(old, next interface{}) interface{} { + if old == nil { + return 1.0 + } + return old.(float64) + 1.0 + }, nil + case aggrTypeSqr: + return func(old, next interface{}) interface{} { + if old == nil { + return next.(float64) * next.(float64) + } + return old.(float64) + next.(float64)*next.(float64) + }, nil + default: + return GetServerAggregationsFunction(aggr) + } +} + +func GetDependantAggregates(aggr AggrType) []AggrType { + var aggregates []AggrType + for _, rawAggr := range rawAggregates { + if aggr&rawAggr == rawAggr { + aggregates = append(aggregates, rawAggr) + } + } + return aggregates +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate_test.go new file mode 100644 index 00000000..72d158b1 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate_test.go @@ -0,0 +1,144 @@ +// +build unit + +package aggregate + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func TestAggregates(t *testing.T) { + testCases := []struct { + desc string + aggString string + data map[int64]float64 + exprCol string + bucket int + expectedUpdateExpr string + expectedSetExpr string + expectFail bool + ignoreReason string + }{ + {desc: "Should aggregate data with Count aggregate", + aggString: "count", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: "_v_count[1]=_v_count[1]+2;", expectedSetExpr: "_v_count[1]=2;"}, + + {desc: "Should aggregate data with Sum aggregate", + aggString: "sum", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_sum[1]=_v_sum[1]+%s;_v_count[1]=_v_count[1]+2;", utils.FloatToNormalizedScientificStr(10.0)), + expectedSetExpr: fmt.Sprintf("_v_sum[1]=%s;_v_count[1]=2;", utils.FloatToNormalizedScientificStr(10.0))}, + + {desc: "Should aggregate data with Sqr aggregate", + aggString: "sqr", + data: map[int64]float64{1: 2.0}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_sqr[1]=_v_sqr[1]+%s;_v_count[1]=_v_count[1]+1;", utils.FloatToNormalizedScientificStr(4.0)), + expectedSetExpr: fmt.Sprintf("_v_sqr[1]=%s;_v_count[1]=1;", utils.FloatToNormalizedScientificStr(4.0))}, + + {desc: "Should aggregate data with Min & Max aggregates", + aggString: "min,max", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_min[1]=min(_v_min[1],%s);_v_max[1]=max(_v_max[1],%s);_v_count[1]=_v_count[1]+2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5)), + expectedSetExpr: fmt.Sprintf("_v_min[1]=%s;_v_max[1]=%s;_v_count[1]=2;", + utils.FloatToNormalizedScientificStr(2.5), + utils.FloatToNormalizedScientificStr(7.5))}, + + {desc: "Should aggregate data with Count,Sum,Sqr,Last aggregates", + aggString: "count,sum,sqr,last", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_count[1]=_v_count[1]+2;_v_sum[1]=_v_sum[1]+%s;_v_sqr[1]=_v_sqr[1]+%s;_v_last[1]=%s;", + utils.FloatToNormalizedScientificStr(10.0), utils.FloatToNormalizedScientificStr(62.5), + utils.FloatToNormalizedScientificStr(2.5)), + expectedSetExpr: fmt.Sprintf("_v_count[1]=2;_v_sum[1]=%s;_v_sqr[1]=%s;_v_last[1]=%s;", + utils.FloatToNormalizedScientificStr(10.0), + utils.FloatToNormalizedScientificStr(62.5), utils.FloatToNormalizedScientificStr(2.5))}, + + {desc: "Should aggregate data with Wildcard aggregates", + aggString: "*", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_count[1]=_v_count[1]+2;_v_sum[1]=_v_sum[1]+%s;"+ + "_v_sqr[1]=_v_sqr[1]+%s;_v_min[1]=min(_v_min[1],%s);_v_max[1]=max(_v_max[1],%s);"+ + "_v_last[1]=%s;", utils.FloatToNormalizedScientificStr(10.0), + utils.FloatToNormalizedScientificStr(62.5), + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5), + utils.FloatToNormalizedScientificStr(2.5)), + expectedSetExpr: fmt.Sprintf("_v_count[1]=2;_v_sum[1]=%s;_v_sqr[1]=%s;"+ + "_v_min[1]=%s;_v_max[1]=%s;_v_last[1]=%s;", + utils.FloatToNormalizedScientificStr(10.0), utils.FloatToNormalizedScientificStr(62.5), + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5), + utils.FloatToNormalizedScientificStr(2.5))}, + + {desc: "Should aggregate data with Bad aggregate", + aggString: "not-real", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: "_v_count[1]=_v_count[1]+2;", expectedSetExpr: "_v_count[1]=2;", expectFail: true}, + + {desc: "Should aggregate data when specifying aggregates with sapces", + aggString: "min , max ", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_min[1]=min(_v_min[1],%s);_v_max[1]=max(_v_max[1],%s);_v_count[1]=_v_count[1]+2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5)), + expectedSetExpr: fmt.Sprintf("_v_min[1]=%s;_v_max[1]=%s;_v_count[1]=2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5))}, + + {desc: "Should aggregate data when specifying aggregates with empty values", + aggString: "min , ,max ", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_min[1]=min(_v_min[1],%s);_v_max[1]=max(_v_max[1],%s);_v_count[1]=_v_count[1]+2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5)), + expectedSetExpr: fmt.Sprintf("_v_min[1]=%s;_v_max[1]=%s;_v_count[1]=2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5))}, + } + + for _, test := range testCases { + t.Logf("%s\n", test.desc) + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testAggregateCase(t, test.aggString, test.data, test.exprCol, test.bucket, test.expectedUpdateExpr, + test.expectedSetExpr, test.expectFail) + }) + } +} + +func testAggregateCase(t *testing.T, aggString string, data map[int64]float64, exprCol string, bucket int, + expectedUpdateExpr string, expectedSetExpr string, expectFail bool) { + + aggregates, _, err := AggregatesFromStringListWithCount(strings.Split(aggString, ",")) + if err != nil { + if !expectFail { + t.Fatal(err) + } else { + return + } + } + aggregatesList := NewAggregatesList(aggregates) + + for k, v := range data { + aggregatesList.Aggregate(k, v) + } + + actualUpdateExpr := strings.Split(aggregatesList.UpdateExpr(exprCol, bucket), ";") + expectedUpdateExprSet := strings.Split(expectedUpdateExpr, ";") + assert.ElementsMatch(t, actualUpdateExpr, expectedUpdateExprSet) + + actualSetExpr := strings.Split(aggregatesList.SetExpr(exprCol, bucket), ";") + expectedSetExprSet := strings.Split(expectedSetExpr, ";") + assert.ElementsMatch(t, actualSetExpr, expectedSetExprSet) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregationParams.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregationParams.go new file mode 100644 index 00000000..94c6f11d --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregationParams.go @@ -0,0 +1,90 @@ +package aggregate + +import ( + "fmt" + "strings" +) + +type AggregationParams struct { + colName string // column name ("v" in timeseries) + aggrMask AggrType // the sum of aggregates (or between all aggregates) + rollupTime int64 // time per bucket (cell in the array) + Interval int64 // requested (query) aggregation step + buckets int // number of buckets in the array + overlapWindows []int // a list of overlapping windows (* interval), e.g. last 1hr, 6hr, 12hr, 24hr + aggregationWindow int64 // a time window on which to calculate the aggregation per Interval + disableClientAggregation bool + useServerAggregateCoefficient int +} + +func NewAggregationParams(functions, col string, + buckets int, + interval, aggregationWindow, rollupTime int64, + windows []int, + disableClientAggregation bool, + useServerAggregateCoefficient int) (*AggregationParams, error) { + + aggregatesList := strings.Split(functions, ",") + aggrMask, _, err := AggregatesFromStringListWithCount(aggregatesList) + if err != nil { + return nil, err + } + + newAggregateSeries := AggregationParams{ + aggrMask: aggrMask, + colName: col, + buckets: buckets, + rollupTime: rollupTime, + aggregationWindow: aggregationWindow, + Interval: interval, + overlapWindows: windows, + disableClientAggregation: disableClientAggregation, + useServerAggregateCoefficient: useServerAggregateCoefficient, + } + + return &newAggregateSeries, nil +} + +func (as *AggregationParams) CanAggregate(partitionAggr AggrType) bool { + // Get only the raw aggregates from what the user requested + aggrMask := rawAggregatesMask & as.aggrMask + // make sure the DB has all the aggregates we need (on bits in the mask) + // and that the requested interval is greater/eq to aggregate resolution and is an even divisor + // if interval and rollup are not even divisors we need higher resolution (3x) to smooth the graph + // when we add linear/spline graph projection we can reduce back to 1x + return ((aggrMask & partitionAggr) == aggrMask) && + (as.Interval/as.rollupTime > int64(as.useServerAggregateCoefficient) || (as.Interval == as.rollupTime && as.disableClientAggregation)) && + (as.aggregationWindow == 0 || as.aggregationWindow >= as.rollupTime) +} + +func (as *AggregationParams) GetAggrMask() AggrType { + return as.aggrMask +} + +func (as *AggregationParams) GetRollupTime() int64 { + return as.rollupTime +} + +func (as *AggregationParams) GetAggregationWindow() int64 { + return as.aggregationWindow +} + +func (as *AggregationParams) HasAggregationWindow() bool { + return as.aggregationWindow > 0 +} + +func (as *AggregationParams) toAttrName(aggr AggrType) string { + return fmt.Sprintf("_%v_%v", as.colName, aggr.String()) +} + +func (as *AggregationParams) GetAttrNames() []string { + var names []string + + for _, aggr := range rawAggregates { + if aggr&as.aggrMask != 0 { + names = append(names, as.toAttrName(aggr)) + } + } + + return names +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/functions.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/functions.go new file mode 100644 index 00000000..7cc2b73d --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/functions.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package aggregate + +import ( + "fmt" + "math" + + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type Aggregate interface { + Aggregate(t int64, v float64) + Clear() + GetAttr() string + UpdateExpr(col string, bucket int) string + SetExpr(col string, bucket int) string + InitExpr(col string, buckets int) string +} + +// Count aggregate +type CountAggregate struct { + count int +} + +func (a *CountAggregate) Aggregate(t int64, v float64) { a.count++ } +func (a *CountAggregate) Clear() { a.count = 0 } +func (a *CountAggregate) GetAttr() string { return "count" } + +func (a *CountAggregate) UpdateExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_count[%d]=_%s_count[%d]+%d;", col, bucket, col, bucket, a.count) +} + +func (a *CountAggregate) SetExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_count[%d]=%d;", col, bucket, a.count) +} + +func (a *CountAggregate) InitExpr(col string, buckets int) string { + return fmt.Sprintf("_%s_count=init_array(%d,'int');", col, buckets) +} + +// base float64 Aggregate +type FloatAggregate struct { + val float64 + attr string +} + +func (a *FloatAggregate) Clear() { a.val = 0 } +func (a *FloatAggregate) GetAttr() string { return a.attr } +func (a *FloatAggregate) GetVal() float64 { return a.val } +func (a *FloatAggregate) SetExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_%s[%d]=%s;", col, a.attr, bucket, utils.FloatToNormalizedScientificStr(a.val)) +} + +func (a *FloatAggregate) UpdateExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_%s[%d]=_%s_%s[%d]+%s;", col, a.attr, bucket, col, a.attr, bucket, + utils.FloatToNormalizedScientificStr(a.val)) +} + +func (a *FloatAggregate) InitExpr(col string, buckets int) string { + return fmt.Sprintf("_%s_%s=init_array(%d,'double',%f);", col, a.attr, buckets, a.val) +} + +// Sum Aggregate +type SumAggregate struct{ FloatAggregate } + +func (a *SumAggregate) Aggregate(t int64, v float64) { + if utils.IsDefined(v) { + a.val += v + } +} + +// Power of 2 Aggregate +type SqrAggregate struct{ FloatAggregate } + +func (a *SqrAggregate) Aggregate(t int64, v float64) { + if utils.IsDefined(v) { + a.val += v * v + } +} + +// Minimum Aggregate +type MinAggregate struct{ FloatAggregate } + +func (a *MinAggregate) Clear() { a.val = math.Inf(1) } + +func (a *MinAggregate) Aggregate(t int64, v float64) { + if v < a.val { + a.val = v + } +} +func (a *MinAggregate) UpdateExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_%s[%d]=min(_%s_%s[%d],%s);", col, a.attr, bucket, col, a.attr, bucket, + utils.FloatToNormalizedScientificStr(a.val)) +} + +// Maximum Aggregate +type MaxAggregate struct{ FloatAggregate } + +func (a *MaxAggregate) Clear() { a.val = math.Inf(-1) } + +func (a *MaxAggregate) Aggregate(t int64, v float64) { + if v > a.val { + a.val = v + } +} +func (a *MaxAggregate) UpdateExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_%s[%d]=max(_%s_%s[%d],%s);", col, a.attr, bucket, col, a.attr, bucket, + utils.FloatToNormalizedScientificStr(a.val)) +} + +// Last value Aggregate +type LastAggregate struct { + FloatAggregate + lastT int64 +} + +func (a *LastAggregate) Clear() { a.val = math.Inf(-1) } + +func (a *LastAggregate) Aggregate(t int64, v float64) { + if t > a.lastT { + a.val = v + a.lastT = t + } +} + +func (a *LastAggregate) UpdateExpr(col string, bucket int) string { + if utils.IsUndefined(a.val) { + return "" + } + + return fmt.Sprintf("_%s_%s[%d]=%s;", col, a.attr, bucket, utils.FloatToNormalizedScientificStr(a.val)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go new file mode 100644 index 00000000..f8699148 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go @@ -0,0 +1,393 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package aggregate + +import ( + "fmt" + "math" + "strings" + + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Local cache of init arrays per aggregate type. Used to mimic memcopy and initialize data arrays with specific values +var initDataArrayCache = map[AggrType][]float64{} + +type Series struct { + colName string // column name ("v" in timeseries) + functions []AggrType // list of aggregation functions to return (count, avg, sum, ..) + aggrMask AggrType // the sum of aggregates (or between all aggregates) + rollupTime int64 // time per bucket (cell in the array) + interval int64 // requested (query) aggregation step + buckets int // number of buckets in the array + overlapWindows []int // a list of overlapping windows (* interval), e.g. last 1hr, 6hr, 12hr, 24hr +} + +func NewAggregateSeries(functions, col string, buckets int, interval, rollupTime int64, windows []int) (*Series, error) { + + split := strings.Split(functions, ",") + var aggrMask AggrType + var aggrList []AggrType + + for _, s := range split { + aggr, ok := aggrTypeString[s] + if !ok { + return nil, fmt.Errorf("invalid aggragator type %s", s) + } + aggrMask = aggrMask | aggr + aggrList = append(aggrList, aggr) + } + + // Always have count Aggregate by default + if aggrMask != 0 { + aggrMask |= aggrTypeCount + } + + newAggregateSeries := Series{ + aggrMask: aggrMask, + functions: aggrList, + colName: col, + buckets: buckets, + rollupTime: rollupTime, + interval: interval, + overlapWindows: windows, + } + + return &newAggregateSeries, nil +} + +func (as *Series) CanAggregate(partitionAggr AggrType) bool { + // keep only real aggregates + aggrMask := 0x7f & as.aggrMask + // make sure the DB has all the aggregates we need (on bits in the mask) + // and that the requested interval is greater/eq to aggregate resolution and is an even divisor + // if interval and rollup are not even divisors we need higher resolution (3x) to smooth the graph + // when we add linear/spline graph projection we can reduce back to 1x + return ((aggrMask & partitionAggr) == aggrMask) && + as.interval >= as.rollupTime && (as.interval%as.rollupTime == 0 || as.interval/as.rollupTime > 3) +} + +func (as *Series) GetAggrMask() AggrType { + return as.aggrMask +} + +func (as *Series) GetFunctions() []AggrType { + return as.functions +} + +func (as *Series) NumFunctions() int { + return len(as.functions) +} + +func (as *Series) toAttrName(aggr AggrType) string { + return "_" + as.colName + "_" + aggr.String() +} + +func (as *Series) GetAttrNames() []string { + var names []string + + for _, aggr := range rawAggregates { + if aggr&as.aggrMask != 0 { + names = append(names, as.toAttrName(aggr)) + } + } + + return names +} + +// create new aggregation set from v3io aggregation array attributes +func (as *Series) NewSetFromAttrs( + length, start, end int, mint, maxt int64, attrs *map[string]interface{}) (*Set, error) { + + aggrArrays := map[AggrType][]uint64{} + dataArrays := map[AggrType][]float64{} + + var maxAligned int64 + if as.overlapWindows != nil { + length = len(as.overlapWindows) + maxAligned = (maxt / as.interval) * as.interval + } + + for _, aggr := range rawAggregates { + if aggr&as.aggrMask != 0 { + attrBlob, ok := (*attrs)[as.toAttrName(aggr)] + if !ok { + return nil, fmt.Errorf("aggregation attribute %s was not found", as.toAttrName(aggr)) + } + aggrArrays[aggr] = utils.AsInt64Array(attrBlob.([]byte)) + + dataArrays[aggr] = make([]float64, length, length) + copy(dataArrays[aggr], getOrCreateInitDataArray(aggr, length)) + } + } + + aggrSet := Set{length: length, interval: as.interval, overlapWin: as.overlapWindows} + aggrSet.dataArrays = dataArrays + + arrayIndex := start + i := 0 + + for arrayIndex != end { + + if as.overlapWindows == nil { + + // standard aggregates (evenly spaced intervals) + cellIndex := int((int64(i) * as.rollupTime) / as.interval) + for aggr, array := range aggrArrays { + aggrSet.mergeArrayCell(aggr, cellIndex, array[arrayIndex]) + } + } else { + + // overlapping time windows (last 1hr, 6hr, ..) + t := mint + (int64(i) * as.rollupTime) + if t < maxAligned { + for i, win := range as.overlapWindows { + if t > maxAligned-int64(win)*as.interval { + for aggr, array := range aggrArrays { + aggrSet.mergeArrayCell(aggr, i, array[arrayIndex]) + } + } + } + } + + } + + i++ + arrayIndex = (arrayIndex + 1) % (as.buckets + 1) + } + + return &aggrSet, nil +} + +// prepare new aggregation set from v3io raw chunk attributes (in case there are no aggregation arrays) +func (as *Series) NewSetFromChunks(length int) *Set { + + if as.overlapWindows != nil { + length = len(as.overlapWindows) + } + + newAggregateSet := Set{length: length, interval: as.interval, overlapWin: as.overlapWindows} + dataArrays := map[AggrType][]float64{} + + for _, aggr := range rawAggregates { + if aggr&as.aggrMask != 0 { + dataArrays[aggr] = make([]float64, length, length) // TODO: len/capacity & reuse (pool) + initArray := getOrCreateInitDataArray(aggr, length) + copy(dataArrays[aggr], initArray) + } + } + + newAggregateSet.dataArrays = dataArrays + return &newAggregateSet +} + +type Set struct { + dataArrays map[AggrType][]float64 + length int + maxCell int + baseTime int64 + interval int64 + overlapWin []int +} + +func (as *Set) GetMaxCell() int { + return as.maxCell +} + +// append the value to a cell in all relevant aggregation arrays +func (as *Set) AppendAllCells(cell int, val float64) { + + if !isValidCell(cell, as) { + return + } + + if cell > as.maxCell { + as.maxCell = cell + } + + for aggr := range as.dataArrays { + as.updateCell(aggr, cell, val) + } +} + +// append/merge server aggregation values into aggregation per requested interval/step +// if the requested step interval is higher than stored interval we need to collapse multiple cells to one +func (as *Set) mergeArrayCell(aggr AggrType, cell int, val uint64) { + + if cell >= as.length { + return + } + + if cell > as.maxCell { + as.maxCell = cell + } + + if aggr == aggrTypeCount { + as.dataArrays[aggr][cell] += float64(val) + } else { + float := math.Float64frombits(val) + // When getting already aggregated sqr aggregate we just need to sum. + if aggr == aggrTypeSqr { + as.dataArrays[aggr][cell] += float + } else { + as.updateCell(aggr, cell, float) + } + } +} + +func isValidCell(cellIndex int, aSet *Set) bool { + return cellIndex >= 0 && + cellIndex < aSet.length +} + +// function specific aggregation +func (as *Set) updateCell(aggr AggrType, cell int, val float64) { + + if !isValidCell(cell, as) { + return + } + + cellValue := as.dataArrays[aggr][cell] + switch aggr { + case aggrTypeCount: + as.dataArrays[aggr][cell]++ + case aggrTypeSum: + as.dataArrays[aggr][cell] += val + case aggrTypeSqr: + as.dataArrays[aggr][cell] += val * val + case aggrTypeMin: + if val < cellValue { + as.dataArrays[aggr][cell] = val + } + case aggrTypeMax: + if val > cellValue { + as.dataArrays[aggr][cell] = val + } + case aggrTypeLast: + as.dataArrays[aggr][cell] = val + } +} + +// return the value per aggregate or complex function +func (as *Set) GetCellValue(aggr AggrType, cell int) (float64, bool) { + + if !isValidCell(cell, as) { + return math.NaN(), false + } + + dependsOnSum := aggr == aggrTypeStddev || aggr == aggrTypeStdvar || aggr == aggrTypeAvg + dependsOnSqr := aggr == aggrTypeStddev || aggr == aggrTypeStdvar + dependsOnLast := aggr == aggrTypeLast || aggr == aggrTypeRate + + // return undefined result one dependant fields is missing + if (dependsOnSum && utils.IsUndefined(as.dataArrays[aggrTypeSum][cell])) || + (dependsOnSqr && utils.IsUndefined(as.dataArrays[aggrTypeSqr][cell]) || + (dependsOnLast && utils.IsUndefined(as.dataArrays[aggrTypeLast][cell]))) { + return math.NaN(), false + } + + // if no samples in this bucket the result is undefined + var cnt float64 + if dependsOnSum { + cnt = as.dataArrays[aggrTypeCount][cell] + if cnt == 0 { + return math.NaN(), false + } + } + + switch aggr { + case aggrTypeAvg: + return as.dataArrays[aggrTypeSum][cell] / cnt, true + case aggrTypeStddev: + sum := as.dataArrays[aggrTypeSum][cell] + sqr := as.dataArrays[aggrTypeSqr][cell] + return math.Sqrt((cnt*sqr - sum*sum) / (cnt * (cnt - 1))), true + case aggrTypeStdvar: + sum := as.dataArrays[aggrTypeSum][cell] + sqr := as.dataArrays[aggrTypeSqr][cell] + return (cnt*sqr - sum*sum) / (cnt * (cnt - 1)), true + case aggrTypeRate: + if cell == 0 { + return math.NaN(), false + } + // TODO: need to clarify the meaning of this type of aggregation. IMHO, rate has meaning for monotonic counters only + last := as.dataArrays[aggrTypeLast][cell-1] + this := as.dataArrays[aggrTypeLast][cell] + return (this - last) / float64(as.interval/1000), true // rate per sec + default: + return as.dataArrays[aggr][cell], true + } +} + +// get the time per aggregate cell +func (as *Set) GetCellTime(base int64, index int) int64 { + if as.overlapWin == nil { + return base + int64(index)*as.interval + } + + if index >= len(as.overlapWin) { + return base + } + + return base - int64(as.overlapWin[index])*as.interval +} + +func (as *Set) Clear() { + as.maxCell = 0 + for aggr := range as.dataArrays { + initArray := getOrCreateInitDataArray(aggr, len(as.dataArrays[0])) + copy(as.dataArrays[aggr], initArray) + } +} + +// Check if cell has data. Assumes that count is always present +func (as *Set) HasData(cell int) bool { + return as.dataArrays[aggrTypeCount][cell] > 0 +} + +func getOrCreateInitDataArray(aggrType AggrType, length int) []float64 { + // Create once or override if required size is greater than existing array + if initDataArrayCache[aggrType] == nil || len(initDataArrayCache[aggrType]) < length { + initDataArrayCache[aggrType] = createInitDataArray(aggrType, length) + } + return initDataArrayCache[aggrType] +} + +func createInitDataArray(aggrType AggrType, length int) []float64 { + // Prepare "clean" array for fastest reset of "uninitialized" data arrays + resultArray := make([]float64, length, length) + + var initWith float64 + switch aggrType { + case aggrTypeMin: + initWith = math.Inf(1) + case aggrTypeMax: + initWith = math.Inf(-1) + default: + // NOP - default is 0 + } + + for i := range resultArray { + resultArray[i] = initWith + } + + return resultArray +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go new file mode 100644 index 00000000..26fd9ea5 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go @@ -0,0 +1,351 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package appender + +import ( + "fmt" + "sync" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// TODO: make configurable +const maxRetriesOnWrite = 3 +const channelSize = 4096 +const queueStallTime = 1 * time.Millisecond + +const minimalUnixTimeMs = 0 // year 1970 +const maxUnixTimeMs = 13569465600000 // year 2400 + +// to add, rollups policy (cnt, sum, min/max, sum^2) + interval , or policy in per name label +type MetricState struct { + sync.RWMutex + state storeState + Lset utils.LabelsIfc + key string + name string + hash uint64 + refID uint64 + + aggrs []*MetricState + + store *chunkStore + err error + retryCount uint8 + newName bool + isVariant bool +} + +// Metric store states +type storeState uint8 + +const ( + storeStateInit storeState = 0 + storeStatePreGet storeState = 1 // Need to get state + storeStateGet storeState = 2 // Getting old state from storage + storeStateReady storeState = 3 // Ready to update + storeStateUpdate storeState = 4 // Update/write in progress +) + +// store is ready to update samples into the DB +func (m *MetricState) isReady() bool { + return m.state == storeStateReady +} + +func (m *MetricState) getState() storeState { + return m.state +} + +func (m *MetricState) setState(state storeState) { + m.state = state +} + +func (m *MetricState) setError(err error) { + m.err = err +} + +func (m *MetricState) error() error { + m.RLock() + defer m.RUnlock() + return m.err +} + +type cacheKey struct { + name string + hash uint64 +} + +// store the state and metadata for all the metrics +type MetricsCache struct { + cfg *config.V3ioConfig + partitionMngr *partmgr.PartitionManager + mtx sync.RWMutex + container v3io.Container + logger logger.Logger + started bool + + responseChan chan *v3io.Response + nameUpdateChan chan *v3io.Response + asyncAppendChan chan *asyncAppend + updatesInFlight int + + metricQueue *ElasticQueue + updatesComplete chan int + newUpdates chan int + + lastMetric uint64 + + // TODO: consider switching to synch.Map (https://golang.org/pkg/sync/#Map) + cacheMetricMap map[cacheKey]*MetricState // TODO: maybe use hash as key & combine w ref + cacheRefMap map[uint64]*MetricState // TODO: maybe turn to list + free list, periodically delete old matrics + + NameLabelMap map[string]bool // temp store all lable names + + lastError error + performanceReporter *performance.MetricReporter + + stopChan chan int +} + +func NewMetricsCache(container v3io.Container, logger logger.Logger, cfg *config.V3ioConfig, + partMngr *partmgr.PartitionManager) *MetricsCache { + + newCache := MetricsCache{container: container, logger: logger, cfg: cfg, partitionMngr: partMngr} + newCache.cacheMetricMap = map[cacheKey]*MetricState{} + newCache.cacheRefMap = map[uint64]*MetricState{} + + newCache.responseChan = make(chan *v3io.Response, channelSize) + newCache.nameUpdateChan = make(chan *v3io.Response, channelSize) + newCache.asyncAppendChan = make(chan *asyncAppend, channelSize) + + newCache.metricQueue = NewElasticQueue() + newCache.updatesComplete = make(chan int, 100) + newCache.newUpdates = make(chan int, 1000) + newCache.stopChan = make(chan int, 3) + + newCache.NameLabelMap = map[string]bool{} + newCache.performanceReporter = performance.ReporterInstanceFromConfig(cfg) + + return &newCache +} + +type asyncAppend struct { + metric *MetricState + t int64 + v interface{} + resp chan int +} + +func (mc *MetricsCache) Start() error { + err := mc.start() + if err != nil { + return errors.Wrap(err, "Failed to start Appender loop") + } + + return nil +} + +// return metric struct by key +func (mc *MetricsCache) getMetric(name string, hash uint64) (*MetricState, bool) { + mc.mtx.RLock() + defer mc.mtx.RUnlock() + + metric, ok := mc.cacheMetricMap[cacheKey{name, hash}] + return metric, ok +} + +// create a new metric and save in the map +func (mc *MetricsCache) addMetric(hash uint64, name string, metric *MetricState) { + mc.mtx.Lock() + defer mc.mtx.Unlock() + + mc.lastMetric++ + metric.refID = mc.lastMetric + mc.cacheRefMap[mc.lastMetric] = metric + mc.cacheMetricMap[cacheKey{name, hash}] = metric + if _, ok := mc.NameLabelMap[name]; !ok { + metric.newName = true + mc.NameLabelMap[name] = true + } +} + +// return metric struct by refID +func (mc *MetricsCache) getMetricByRef(ref uint64) (*MetricState, bool) { + mc.mtx.RLock() + defer mc.mtx.RUnlock() + + metric, ok := mc.cacheRefMap[ref] + return metric, ok +} + +// Push append to async channel +func (mc *MetricsCache) appendTV(metric *MetricState, t int64, v interface{}) { + mc.asyncAppendChan <- &asyncAppend{metric: metric, t: t, v: v} +} + +// First time add time & value to metric (by label set) +func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint64, error) { + + err := verifyTimeValid(t) + if err != nil { + return 0, err + } + + var isValueVariantType bool + // If the value is not of Float type assume it's variant type. + switch v.(type) { + case int, int64, float64, float32: + isValueVariantType = false + default: + isValueVariantType = true + } + + name, key, hash := lset.GetKey() + err = utils.IsValidMetricName(name) + if err != nil { + return 0, err + } + metric, ok := mc.getMetric(name, hash) + + var aggrMetrics []*MetricState + if !ok { + for _, preAggr := range mc.partitionMngr.GetConfig().TableSchemaInfo.PreAggregates { + subLset := lset.Filter(preAggr.Labels) + name, key, hash := subLset.GetKey() + aggrMetric, ok := mc.getMetric(name, hash) + if !ok { + aggrMetric = &MetricState{Lset: subLset, key: key, name: name, hash: hash} + aggrMetric.store = newChunkStore(mc.logger, subLset.LabelNames(), true) + mc.addMetric(hash, name, aggrMetric) + aggrMetrics = append(aggrMetrics, aggrMetric) + } + } + metric = &MetricState{Lset: lset, key: key, name: name, hash: hash, + aggrs: aggrMetrics, isVariant: isValueVariantType} + + metric.store = newChunkStore(mc.logger, lset.LabelNames(), false) + mc.addMetric(hash, name, metric) + } else { + aggrMetrics = metric.aggrs + } + + err = metric.error() + metric.setError(nil) + + if isValueVariantType != metric.isVariant { + newValueType := "numeric" + if isValueVariantType { + newValueType = "string" + } + existingValueType := "numeric" + if metric.isVariant { + existingValueType = "string" + } + return 0, errors.Errorf("Cannot append %v type metric to %v type metric.", newValueType, existingValueType) + } + + mc.appendTV(metric, t, v) + for _, aggrMetric := range aggrMetrics { + mc.appendTV(aggrMetric, t, v) + } + + return metric.refID, err +} + +// fast Add to metric (by refID) +func (mc *MetricsCache) AddFast(ref uint64, t int64, v interface{}) error { + + err := verifyTimeValid(t) + if err != nil { + return err + } + + metric, ok := mc.getMetricByRef(ref) + if !ok { + mc.logger.ErrorWith("Ref not found", "ref", ref) + return fmt.Errorf("ref not found") + } + + err = metric.error() + metric.setError(nil) + + mc.appendTV(metric, t, v) + + for _, aggrMetric := range metric.aggrs { + mc.appendTV(aggrMetric, t, v) + } + + return err +} + +func verifyTimeValid(t int64) error { + if t > maxUnixTimeMs || t < minimalUnixTimeMs { + return fmt.Errorf("time '%d' doesn't seem to be a valid Unix timesamp in milliseconds. The time must be in the years range 1970-2400", t) + } + return nil +} +func (mc *MetricsCache) Close() { + //for 3 go funcs + mc.stopChan <- 0 + mc.stopChan <- 0 + mc.stopChan <- 0 +} + +func (mc *MetricsCache) WaitForCompletion(timeout time.Duration) (int, error) { + waitChan := make(chan int, 2) + mc.asyncAppendChan <- &asyncAppend{metric: nil, t: 0, v: 0, resp: waitChan} + + var maxWaitTime time.Duration + + if timeout == 0 { + maxWaitTime = 24 * time.Hour // Almost-infinite time + } else if timeout > 0 { + maxWaitTime = timeout + } else { + // If negative, use the default configured timeout value + maxWaitTime = time.Duration(mc.cfg.DefaultTimeoutInSeconds) * time.Second + } + + var resultCount int + var err error + + mc.performanceReporter.WithTimer("WaitForCompletionTimer", func() { + select { + case resultCount = <-waitChan: + err = mc.lastError + mc.lastError = nil + return + case <-time.After(maxWaitTime): + resultCount = 0 + err = errors.Errorf("The operation timed out after %.2f seconds.", maxWaitTime.Seconds()) + return + } + }) + + return resultCount, err +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/equeue.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/equeue.go new file mode 100644 index 00000000..615785b7 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/equeue.go @@ -0,0 +1,148 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package appender + +import ( + "sync" +) + +const ListSize = 256 + +type list [ListSize]*MetricState + +type ElasticQueue struct { + mtx sync.RWMutex + data []*list + head, tail int +} + +// Elastic Queue, a fifo queue with dynamic resize +func NewElasticQueue() *ElasticQueue { + newQueue := ElasticQueue{} + newQueue.data = append(newQueue.data, &list{}) + return &newQueue +} + +// Is the queue empty +func (eq *ElasticQueue) IsEmpty() bool { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + return eq.head == eq.tail +} + +// Number of elements in the queue +func (eq *ElasticQueue) Length() int { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + return eq.length() +} + +func (eq *ElasticQueue) length() int { + if eq.head >= eq.tail { + return eq.head - eq.tail + } + + return eq.head + (len(eq.data) * ListSize) - eq.tail +} + +func (eq *ElasticQueue) Push(val *MetricState) int { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + return eq.push(val) +} + +// Push a value to the queue +func (eq *ElasticQueue) push(val *MetricState) int { + headBlock, headOffset := eq.head/ListSize, eq.head%ListSize + tailBlock := eq.tail / ListSize + //wasEmpty := eq.head == eq.tail + + if headBlock == tailBlock-1 && headOffset == ListSize-1 { + eq.data = append(eq.data, &list{}) + copy(eq.data[tailBlock+1:], eq.data[tailBlock:]) + eq.data[tailBlock] = &list{} + + eq.tail += ListSize + + } + + if headBlock == len(eq.data)-1 && headOffset == ListSize-1 { + if tailBlock == 0 { + eq.data = append(eq.data, &list{}) + } + } + + eq.head = (eq.head + 1) % (len(eq.data) * ListSize) + eq.data[headBlock][headOffset] = val + return eq.length() +} + +func (eq *ElasticQueue) Pop() *MetricState { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + return eq.pop() +} + +func (eq *ElasticQueue) PopN(length int) []*MetricState { + eq.mtx.Lock() + defer eq.mtx.Unlock() + var list []*MetricState + + for i := 0; i < length; i++ { + metric := eq.pop() + if metric != nil { + list = append(list, metric) + } else { + break + } + } + + return list +} + +// return the oldest value in the queue +func (eq *ElasticQueue) pop() *MetricState { + if eq.head == eq.tail { + return nil + } + + tailBlock, tailOffset := eq.tail/ListSize, eq.tail%ListSize + eq.tail = (eq.tail + 1) % (len(eq.data) * ListSize) + + return eq.data[tailBlock][tailOffset] +} + +// Atomic rotate, push a value to the tail and pop one from the head +func (eq *ElasticQueue) Rotate(val *MetricState) (*MetricState, int) { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + if eq.head == eq.tail { + return val, 0 + } + + length := eq.push(val) + return eq.pop(), length +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go new file mode 100644 index 00000000..eb349e0e --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go @@ -0,0 +1,396 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package appender + +import ( + "fmt" + "net/http" + "reflect" + "time" + + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Start event loops for handling metric updates (appends and Get/Update DB responses) +// TODO: we can use multiple Go routines and spread the metrics across based on Hash LSB. +func (mc *MetricsCache) start() error { + + mc.nameUpdateRespLoop() + mc.metricsUpdateLoop(0) + mc.metricFeed(0) + + return nil +} + +// Read data from the append queue, push it into per-metric queues, and manage ingestion states +func (mc *MetricsCache) metricFeed(index int) { + + go func() { + inFlight := 0 + gotData := false + potentialCompletion := false + var completeChan chan int + + for { + select { + case _ = <-mc.stopChan: + return + case inFlight = <-mc.updatesComplete: + // Handle completion notifications from the update loop + length := mc.metricQueue.Length() + mc.logger.Debug(`Complete update cycle - "in-flight requests"=%d; "metric queue length"=%d\n`, inFlight, length) + + // If data was sent and the queue is empty, mark as completion + if length == 0 && gotData { + switch len(mc.asyncAppendChan) { + case 0: + potentialCompletion = true + if completeChan != nil { + completeChan <- 0 + } + case 1: + potentialCompletion = true + } + } + case app := <-mc.asyncAppendChan: + newMetrics := 0 + dataQueued := 0 + numPushed := 0 + inLoop: + for i := 0; i <= mc.cfg.BatchSize; i++ { + if app.metric == nil { + // Handle update completion requests (metric == nil) + completeChan = app.resp + if potentialCompletion { + completeChan <- 0 + } + } else { + potentialCompletion = false + // Handle append requests (Add / AddFast) + gotData = true + metric := app.metric + metric.Lock() + + metric.store.Append(app.t, app.v) + numPushed++ + dataQueued += metric.store.samplesQueueLength() + + // If there are no in-flight requests, add the metric to the queue and update state + if metric.isReady() || metric.getState() == storeStateInit { + + if metric.getState() == storeStateInit { + metric.setState(storeStatePreGet) + } + if metric.isReady() { + metric.setState(storeStateUpdate) + } + + length := mc.metricQueue.Push(metric) + if length < 2*mc.cfg.Workers { + newMetrics++ + } + } + metric.Unlock() + } + // Poll if we have more updates (accelerate the outer select) + if i < mc.cfg.BatchSize { + select { + case app = <-mc.asyncAppendChan: + default: + break inLoop + } + } + } + // Notify the update loop that there are new metrics to process + if newMetrics > 0 { + mc.newUpdates <- newMetrics + } + + // If we have too much work, stall the queue for some time + if numPushed > mc.cfg.BatchSize/2 && dataQueued/numPushed > 64 { + switch { + case dataQueued/numPushed <= 96: + time.Sleep(queueStallTime) + case dataQueued/numPushed > 96 && dataQueued/numPushed < 200: + time.Sleep(4 * queueStallTime) + default: + time.Sleep(10 * queueStallTime) + } + } + } + } + }() +} + +// An async loop that accepts new metric updates or responses from previous updates and makes new storage requests +func (mc *MetricsCache) metricsUpdateLoop(index int) { + + go func() { + counter := 0 + for { + select { + case _ = <-mc.stopChan: + return + case _ = <-mc.newUpdates: + // Handle new metric notifications (from metricFeed) + for mc.updatesInFlight < mc.cfg.Workers*2 { //&& newMetrics > 0{ + freeSlots := mc.cfg.Workers*2 - mc.updatesInFlight + metrics := mc.metricQueue.PopN(freeSlots) + for _, metric := range metrics { + mc.postMetricUpdates(metric) + } + if len(metrics) < freeSlots { + break + } + } + + if mc.updatesInFlight == 0 { + mc.logger.Debug("Complete new update cycle - in-flight %d.\n", mc.updatesInFlight) + mc.updatesComplete <- 0 + } + case resp := <-mc.responseChan: + // Handle V3IO async responses + nonQueued := mc.metricQueue.IsEmpty() + + inLoop: + for i := 0; i <= mc.cfg.BatchSize; i++ { + + mc.updatesInFlight-- + counter++ + if counter%3000 == 0 { + mc.logger.Debug("Handle response: inFly %d, Q %d", mc.updatesInFlight, mc.metricQueue.Length()) + } + metric := resp.Context.(*MetricState) + mc.handleResponse(metric, resp, nonQueued) + + // Poll if we have more responses (accelerate the outer select) + if i < mc.cfg.BatchSize { + select { + case resp = <-mc.responseChan: + default: + break inLoop + } + } + } + + // Post updates if we have queued metrics and the channel has room for more + for mc.updatesInFlight < mc.cfg.Workers*2 { + freeSlots := mc.cfg.Workers*2 - mc.updatesInFlight + metrics := mc.metricQueue.PopN(freeSlots) + if len(metrics) == 0 { + break + } + for _, metric := range metrics { + mc.postMetricUpdates(metric) + } + } + + // Notify the metric feeder when all in-flight tasks are done + if mc.updatesInFlight == 0 { + mc.logger.Debug("Return to feed. Metric queue length: %d", mc.metricQueue.Length()) + mc.updatesComplete <- 0 + } + } + } + }() +} + +// Send a request with chunk data to the DB +// If in the initial state, read metric metadata from the DB. +func (mc *MetricsCache) postMetricUpdates(metric *MetricState) { + + metric.Lock() + defer metric.Unlock() + var sent bool + var err error + + if metric.getState() == storeStatePreGet { + sent, err = metric.store.getChunksState(mc, metric) + if err != nil { + // Count errors + mc.performanceReporter.IncrementCounter("GetChunksStateError", 1) + + mc.logger.ErrorWith("Failed to get item state", "metric", metric.Lset, "err", err) + setError(mc, metric, err) + } else { + metric.setState(storeStateGet) + } + + } else { + sent, err = metric.store.writeChunks(mc, metric) + if err != nil { + // Count errors + mc.performanceReporter.IncrementCounter("WriteChunksError", 1) + + mc.logger.ErrorWith("Submit failed", "metric", metric.Lset, "err", err) + setError(mc, metric, errors.Wrap(err, "Chunk write submit failed.")) + } else if sent { + metric.setState(storeStateUpdate) + } + if !sent { + if metric.store.samplesQueueLength() == 0 { + metric.setState(storeStateReady) + } else { + if mc.metricQueue.length() > 0 { + mc.newUpdates <- mc.metricQueue.length() + } + } + } + } + + if sent { + mc.updatesInFlight++ + } +} + +// Handle DB responses +// If the backlog queue is empty and have data to send, write more chunks to the DB. +func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, canWrite bool) bool { + defer resp.Release() + metric.Lock() + defer metric.Unlock() + + reqInput := resp.Request().Input + + if resp.Error != nil && metric.getState() != storeStateGet { + req := reqInput.(*v3io.UpdateItemInput) + mc.logger.DebugWith("I/O failure", "id", resp.ID, "err", resp.Error, "key", metric.key, + "in-flight", mc.updatesInFlight, "mqueue", mc.metricQueue.Length(), + "numsamples", metric.store.samplesQueueLength(), "path", req.Path, "update expression", req.Expression) + } else { + mc.logger.DebugWith("I/O response", "id", resp.ID, "err", resp.Error, "key", metric.key, "request type", + reflect.TypeOf(reqInput), "request", reqInput) + } + + if metric.getState() == storeStateGet { + // Handle Get response, sync metric state with the DB + metric.store.processGetResp(mc, metric, resp) + + } else { + // Handle Update Expression responses + if resp.Error == nil { + if !metric.store.isAggr() { + // Set fields so next write won't include redundant info (bytes, labels, init_array) + metric.store.ProcessWriteResp() + } + metric.retryCount = 0 + } else { + clear := func() { + resp.Release() + metric.store = newChunkStore(mc.logger, metric.Lset.LabelNames(), metric.store.isAggr()) + metric.retryCount = 0 + metric.setState(storeStateInit) + } + + // Count errors + mc.performanceReporter.IncrementCounter("ChunkUpdateRetries", 1) + + // Metrics with too many update errors go into Error state + metric.retryCount++ + if e, hasStatusCode := resp.Error.(v3ioerrors.ErrorWithStatusCode); hasStatusCode && e.StatusCode() != http.StatusServiceUnavailable { + // If condition was evaluated as false log this and report this error upstream. + if utils.IsFalseConditionError(resp.Error) { + req := reqInput.(*v3io.UpdateItemInput) + // This might happen on attempt to add metric value of wrong type, i.e. float <-> string + errMsg := fmt.Sprintf("failed to ingest values of incompatible data type into metric %s.", req.Path) + mc.logger.DebugWith(errMsg) + setError(mc, metric, errors.New(errMsg)) + } else { + mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed with status code %d.", e.StatusCode())) + setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed due to status code %d.", e.StatusCode()))) + } + clear() + return false + } else if metric.retryCount == maxRetriesOnWrite { + mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed - exceeded %d retries", maxRetriesOnWrite), "metric", metric.Lset) + setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed after %d retries.", maxRetriesOnWrite))) + clear() + + // Count errors + mc.performanceReporter.IncrementCounter("ChunkUpdateRetryExceededError", 1) + return false + } + } + } + + metric.setState(storeStateReady) + + var sent bool + var err error + + if canWrite { + sent, err = metric.store.writeChunks(mc, metric) + if err != nil { + // Count errors + mc.performanceReporter.IncrementCounter("WriteChunksError", 1) + + mc.logger.ErrorWith("Submit failed", "metric", metric.Lset, "err", err) + setError(mc, metric, errors.Wrap(err, "Chunk write submit failed.")) + } else if sent { + metric.setState(storeStateUpdate) + mc.updatesInFlight++ + } + + } else if metric.store.samplesQueueLength() > 0 { + mc.metricQueue.Push(metric) + metric.setState(storeStateUpdate) + } + + return sent +} + +// Handle responses for names table updates +func (mc *MetricsCache) nameUpdateRespLoop() { + + go func() { + for { + select { + case _ = <-mc.stopChan: + return + case resp := <-mc.nameUpdateChan: + // Handle V3IO PutItem in names table + metric, ok := resp.Context.(*MetricState) + if ok { + metric.Lock() + if resp.Error != nil { + // Count errors + mc.performanceReporter.IncrementCounter("UpdateNameError", 1) + + mc.logger.ErrorWith("Update-name process failed", "id", resp.ID, "name", metric.name) + } else { + mc.logger.DebugWith("Update-name process response", "id", resp.ID, "name", metric.name) + } + metric.Unlock() + } + + resp.Release() + } + } + }() +} + +func setError(mc *MetricsCache, metric *MetricState, err error) { + metric.setError(err) + mc.lastError = err +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go new file mode 100644 index 00000000..d97cab5e --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go @@ -0,0 +1,508 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package appender + +import ( + "encoding/base64" + "fmt" + "path/filepath" + "sort" + "time" + + "github.com/nuclio/logger" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// TODO: make it configurable +const maxLateArrivalInterval = 59 * 60 * 1000 // Max late arrival of 59min + +// Create a chunk store with two chunks (current, previous) +func newChunkStore(logger logger.Logger, labelNames []string, aggrsOnly bool) *chunkStore { + store := chunkStore{ + logger: logger, + lastTid: -1, + } + if !aggrsOnly { + store.chunks[0] = &attrAppender{} + store.chunks[1] = &attrAppender{} + } + store.labelNames = labelNames + store.performanceReporter, _ = performance.DefaultReporterInstance() + return &store +} + +// chunkStore store state & latest + previous chunk appenders +type chunkStore struct { + logger logger.Logger + performanceReporter *performance.MetricReporter + + curChunk int + nextTid int64 + lastTid int64 + chunks [2]*attrAppender + + labelNames []string + aggrList *aggregate.AggregatesList + pending pendingList + maxTime int64 + delRawSamples bool // TODO: for metrics w aggregates only +} + +func (cs *chunkStore) isAggr() bool { + return cs.chunks[0] == nil +} + +func (cs *chunkStore) samplesQueueLength() int { + return len(cs.pending) +} + +// Chunk appender object, state used for appending t/v to a chunk +type attrAppender struct { + state chunkState + appender chunkenc.Appender + partition *partmgr.DBPartition + chunkMint int64 +} + +type chunkState uint8 + +const ( + chunkStateFirst chunkState = 1 + chunkStateMerge chunkState = 2 + chunkStateCommitted chunkState = 4 + chunkStateWriting chunkState = 8 +) + +// Initialize/clear the chunk appender +func (a *attrAppender) initialize(partition *partmgr.DBPartition, t int64) { + a.state = 0 + a.partition = partition + a.chunkMint = partition.GetChunkMint(t) +} + +// Check whether the specified time (t) is within the chunk range +func (a *attrAppender) inRange(t int64) bool { + return a.partition.InChunkRange(a.chunkMint, t) +} + +// Check whether the specified time (t) is ahead of the chunk range +func (a *attrAppender) isAhead(t int64) bool { + return a.partition.IsAheadOfChunk(a.chunkMint, t) +} + +// Append a single t/v pair to a chunk +func (a *attrAppender) appendAttr(t int64, v interface{}) { + a.appender.Append(t, v) +} + +// struct/list storing uncommitted samples, with time sorting support +type pendingData struct { + t int64 + v interface{} +} + +type pendingList []pendingData + +func (l pendingList) Len() int { return len(l) } +func (l pendingList) Less(i, j int) bool { return l[i].t < l[j].t } +func (l pendingList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +// Read (async) the current chunk state and data from the storage, used in the first chunk access +func (cs *chunkStore) getChunksState(mc *MetricsCache, metric *MetricState) (bool, error) { + + if len(cs.pending) == 0 { + return false, nil + } + // Init chunk and create an aggregates-list object based on the partition policy + t := cs.pending[0].t + part, err := mc.partitionMngr.TimeToPart(t) + if err != nil { + return false, err + } + if !cs.isAggr() { + cs.chunks[0].initialize(part, t) + } + cs.nextTid = t + cs.aggrList = aggregate.NewAggregatesList(part.AggrType()) + + // TODO: if policy to merge w old chunks needs to get prev chunk, vs restart appender + + // Issue a GetItem command to the DB to load last state of metric + path := part.GetMetricPath(metric.name, metric.hash, cs.labelNames, cs.isAggr()) + getInput := v3io.GetItemInput{ + Path: path, AttributeNames: []string{config.MaxTimeAttrName}} + + request, err := mc.container.GetItem(&getInput, metric, mc.responseChan) + if err != nil { + mc.logger.ErrorWith("Failed to send a GetItem request to the TSDB", "metric", metric.key, "err", err) + return false, err + } + + mc.logger.DebugWith("Get metric state", "name", metric.name, "key", metric.key, "reqid", request.ID) + return true, nil +} + +// Process the GetItem response from the DB and initialize or restore the current chunk +func (cs *chunkStore) processGetResp(mc *MetricsCache, metric *MetricState, resp *v3io.Response) { + + if !cs.isAggr() { + // TODO: init based on schema, use init function, recover old state vs append based on policy + chunk := chunkenc.NewChunk(cs.logger, metric.isVariant) + app, _ := chunk.Appender() + cs.chunks[0].appender = app + cs.chunks[0].state |= chunkStateFirst + } + + latencyNano := time.Now().UnixNano() - resp.Request().SendTimeNanoseconds + cs.performanceReporter.UpdateHistogram("UpdateMetricLatencyHistogram", latencyNano) + + if resp.Error != nil { + if utils.IsNotExistsError(resp.Error) { + if metric.newName { + path := filepath.Join(mc.cfg.TablePath, config.NamesDirectory, metric.name) + putInput := v3io.PutItemInput{Path: path, Attributes: map[string]interface{}{}} + + request, err := mc.container.PutItem(&putInput, metric, mc.nameUpdateChan) + if err != nil { + cs.performanceReporter.IncrementCounter("PutNameError", 1) + mc.logger.ErrorWith("Update-name PutItem failed", "metric", metric.key, "err", err) + } else { + mc.logger.DebugWith("Update name", "name", metric.name, "key", metric.key, "reqid", request.ID) + } + } + } else { + mc.logger.Error("Update metric has failed with error: %v", resp.Error) + cs.performanceReporter.IncrementCounter("UpdateMetricError", 1) + } + + return + } + + // Check and update the metric item's end time (maxt) timestamp, allow continuing from the last point in case of failure + item := resp.Output.(*v3io.GetItemOutput).Item + var maxTime int64 + val := item[config.MaxTimeAttrName] + if val != nil { + maxTime = int64(val.(int)) + } + mc.logger.DebugWith("Got metric item", "name", metric.name, "key", metric.key, "maxt", maxTime) + + if !mc.cfg.OverrideOld { + cs.maxTime = maxTime + } + + if !cs.isAggr() { + if cs.chunks[0].inRange(maxTime) && !mc.cfg.OverrideOld { + cs.chunks[0].state |= chunkStateMerge + } + } + // Set Last TableId - indicate that there is no need to create metric object + cs.lastTid = cs.nextTid +} + +// Append data to the right chunk and table based on the time and state +func (cs *chunkStore) Append(t int64, v interface{}) { + if metricReporter, err := performance.DefaultReporterInstance(); err == nil { + metricReporter.IncrementCounter("AppendCounter", 1) + } + + cs.pending = append(cs.pending, pendingData{t: t, v: v}) + // If the new time is older than previous times, sort the list + if len(cs.pending) > 1 && cs.pending[len(cs.pending)-2].t > t { + sort.Sort(cs.pending) + } +} + +// Return current, previous, or create new chunk based on sample time +func (cs *chunkStore) chunkByTime(t int64, isVariantEncoding bool) (*attrAppender, error) { + + // Sample is in the current chunk + cur := cs.chunks[cs.curChunk] + if cur.inRange(t) { + return cur, nil + } + + // Sample is in the next chunk, need to initialize + if cur.isAhead(t) { + // Time is ahead of this chunk time, advance the current chunk + part := cur.partition + cur = cs.chunks[cs.curChunk^1] + + chunk := chunkenc.NewChunk(cs.logger, isVariantEncoding) // TODO: init based on schema, use init function + app, err := chunk.Appender() + if err != nil { + return nil, err + } + nextPart, err := part.NextPart(t) + if err != nil { + return nil, err + } + cur.initialize(nextPart, t) + cs.nextTid = t + cur.appender = app + cs.curChunk = cs.curChunk ^ 1 + + return cur, nil + } + + // If it's the first chunk after init we don't allow old updates + if (cur.state & chunkStateFirst) != 0 { + return nil, nil + } + + prev := cs.chunks[cs.curChunk^1] + // Delayed appends - only allowed to previous chunk or within allowed window + if prev.partition != nil && prev.inRange(t) && t > cs.maxTime-maxLateArrivalInterval { + return prev, nil + } + + return nil, nil +} + +// Write all pending samples to DB chunks and aggregates +func (cs *chunkStore) writeChunks(mc *MetricsCache, metric *MetricState) (hasPendingUpdates bool, err error) { + cs.performanceReporter.WithTimer("WriteChunksTimer", func() { + // Return if there are no pending updates + if len(cs.pending) == 0 { + hasPendingUpdates, err = false, nil + return + } + + expr := "" + notInitialized := false + + // Init the partition info and find whether we need to init the metric headers (labels, ..) in the case of a new partition + t0 := cs.pending[0].t + partition, err := mc.partitionMngr.TimeToPart(t0) + if err != nil { + hasPendingUpdates = false + return + } + if partition.GetStartTime() > cs.lastTid { + notInitialized = true + cs.lastTid = partition.GetStartTime() + } + + // Init the aggregation-buckets info + bucket := partition.Time2Bucket(t0) + numBuckets := partition.AggrBuckets() + isNewBucket := bucket > partition.Time2Bucket(cs.maxTime) + + var activeChunk *attrAppender + var pendingSampleIndex int + var pendingSamplesCount int + + // Loop over pending samples, add to chunks & aggregates (create required update expressions) + for pendingSampleIndex < len(cs.pending) && pendingSamplesCount < mc.cfg.BatchSize && partition.InRange(cs.pending[pendingSampleIndex].t) { + sampleTime := cs.pending[pendingSampleIndex].t + + if sampleTime <= cs.maxTime && !mc.cfg.OverrideOld { + mc.logger.WarnWith("Omitting the sample - time is earlier than the last sample time for this metric", "metric", metric.Lset, "T", sampleTime) + + // If we have reached the end of the pending events and there are events to update, create an update expression and break from loop, + // Otherwise, discard the event and continue normally + if pendingSampleIndex == len(cs.pending)-1 { + if pendingSamplesCount > 0 { + expr = expr + cs.aggrList.SetOrUpdateExpr("v", bucket, isNewBucket) + expr = expr + cs.appendExpression(activeChunk) + } + pendingSampleIndex++ + break + } else { + pendingSampleIndex++ + continue + } + } + + // Init activeChunk if nil (when samples are too old); if still too + // old, skip to next sample + if !cs.isAggr() && activeChunk == nil { + activeChunk, err = cs.chunkByTime(sampleTime, metric.isVariant) + if err != nil { + hasPendingUpdates = false + return + } + if activeChunk == nil { + pendingSampleIndex++ + mc.logger.DebugWith("nil active chunk", "T", sampleTime) + continue + } + } + + // Advance maximum time processed in metric + if sampleTime > cs.maxTime { + cs.maxTime = sampleTime + } + + // Add a value to the aggregates list + cs.aggrList.Aggregate(sampleTime, cs.pending[pendingSampleIndex].v) + + if activeChunk != nil { + // Add a value to the compressed raw-values chunk + activeChunk.appendAttr(sampleTime, cs.pending[pendingSampleIndex].v) + } + + // If this is the last item or last item in the same partition, add + // expressions and break + if (pendingSampleIndex == len(cs.pending)-1) || pendingSamplesCount == mc.cfg.BatchSize-1 || !partition.InRange(cs.pending[pendingSampleIndex+1].t) { + expr = expr + cs.aggrList.SetOrUpdateExpr("v", bucket, isNewBucket) + expr = expr + cs.appendExpression(activeChunk) + pendingSampleIndex++ + pendingSamplesCount++ + break + } + + // If the next item is in new Aggregate bucket, generate an + // expression and initialize the new bucket + nextT := cs.pending[pendingSampleIndex+1].t + nextBucket := partition.Time2Bucket(nextT) + if nextBucket != bucket { + expr = expr + cs.aggrList.SetOrUpdateExpr("v", bucket, isNewBucket) + cs.aggrList.Clear() + bucket = nextBucket + isNewBucket = true + } + + // If the next item is in a new chunk, generate an expression and + // initialize the new chunk + if activeChunk != nil && !activeChunk.inRange(nextT) { + expr = expr + cs.appendExpression(activeChunk) + activeChunk, err = cs.chunkByTime(nextT, metric.isVariant) + if err != nil { + hasPendingUpdates = false + return + } + } + + pendingSampleIndex++ + pendingSamplesCount++ + } + + cs.aggrList.Clear() + if pendingSampleIndex == len(cs.pending) { + cs.pending = cs.pending[:0] + } else { + // Leave pending unprocessed or from newer partitions + cs.pending = cs.pending[pendingSampleIndex:] + } + + if pendingSamplesCount == 0 || expr == "" { + if len(cs.pending) > 0 { + mc.metricQueue.Push(metric) + } + hasPendingUpdates = false + return + } + + // If the table object wasn't initialized, insert an init expression + if notInitialized { + // Initialize label (dimension) attributes + lblexpr := metric.Lset.GetExpr() + + // Initialize aggregate arrays + lblexpr = lblexpr + cs.aggrList.InitExpr("v", numBuckets) + + var encodingExpr string + if !cs.isAggr() { + encodingExpr = fmt.Sprintf("%s='%d'; ", config.EncodingAttrName, activeChunk.appender.Encoding()) + } + lsetExpr := fmt.Sprintf("%s='%s'; ", config.LabelSetAttrName, metric.key) + expr = lblexpr + encodingExpr + lsetExpr + expr + } + + conditionExpr := "" + + // Only add the condition when adding to a data chunk, not when writing data to label pre-aggregated + if activeChunk != nil { + // Call the V3IO async UpdateItem method + conditionExpr = fmt.Sprintf("NOT exists(%s) OR (exists(%s) AND %s == '%d')", + config.EncodingAttrName, config.EncodingAttrName, + config.EncodingAttrName, activeChunk.appender.Encoding()) + } + expr += fmt.Sprintf("%v=%d;", config.MaxTimeAttrName, cs.maxTime) // TODO: use max() expr + path := partition.GetMetricPath(metric.name, metric.hash, cs.labelNames, cs.isAggr()) + request, err := mc.container.UpdateItem( + &v3io.UpdateItemInput{Path: path, Expression: &expr, Condition: conditionExpr}, metric, mc.responseChan) + if err != nil { + mc.logger.ErrorWith("UpdateItem failed", "err", err) + hasPendingUpdates = false + } + + // Add the async request ID to the requests map (can be avoided if V3IO + // will add user data in request) + mc.logger.DebugWith("Update-metric expression", "name", metric.name, "key", metric.key, "expr", expr, "reqid", request.ID) + + hasPendingUpdates = true + cs.performanceReporter.UpdateHistogram("WriteChunksSizeHistogram", int64(pendingSamplesCount)) + return + }) + + return +} + +// Process the (async) response for the chunk update request +func (cs *chunkStore) ProcessWriteResp() { + + for _, chunk := range cs.chunks { + // Update the chunk state (if it was written to) + if chunk.state&chunkStateWriting != 0 { + chunk.state |= chunkStateCommitted + chunk.state &^= chunkStateWriting + chunk.appender.Chunk().Clear() + } + } +} + +// Return the chunk's update expression +func (cs *chunkStore) appendExpression(chunk *attrAppender) string { + + if chunk != nil { + bytes := chunk.appender.Chunk().Bytes() + chunk.state |= chunkStateWriting + + expr := "" + idx, err := chunk.partition.TimeToChunkID(chunk.chunkMint) + if err != nil { + return "" + } + attr := chunk.partition.ChunkID2Attr("v", idx) + + val := base64.StdEncoding.EncodeToString(bytes) + + // Overwrite, merge, or append based on the chunk state + if chunk.state&chunkStateCommitted != 0 || chunk.state&chunkStateMerge != 0 { + expr = fmt.Sprintf("%s=if_not_exists(%s,blob('')) + blob('%s'); ", attr, attr, val) + } else { + expr = fmt.Sprintf("%s=blob('%s'); ", attr, val) + } + + return expr + + } + + return "" +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/bstream.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/bstream.go new file mode 100644 index 00000000..5452cc5d --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/bstream.go @@ -0,0 +1,250 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. + +The code in this file was largely written by Damian Gryski as part of +https://github.com/dgryski/go-tsz and published under the license below. +and was later on modified by the Prometheus project in +https://github.com/prometheus/prometheus +Which are licensed under the Apache License, Version 2.0 (the "License"); + +Followed by modifications found here to suit Iguazio needs + +Copyright (c) 2015,2016 Damian Gryski +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +package chunkenc + +import ( + "io" +) + +// bstream is a stream of bits. +type bstream struct { + stream []byte // the data stream + count uint8 // how many bits are valid in current byte +} + +func newBReader(b []byte) *bstream { + return &bstream{stream: b, count: 8} +} + +func newBWriter(size int) *bstream { + return &bstream{stream: make([]byte, 0, size), count: 0} +} + +func (b *bstream) clone() *bstream { + d := make([]byte, len(b.stream)) + copy(d, b.stream) + return &bstream{stream: d, count: b.count} +} + +func (b *bstream) bytes() []byte { + if b.count == 8 { + return b.stream[0 : len(b.stream)-1] + } + return b.stream +} + +type bit bool + +const ( + zero bit = false + one bit = true +) + +func (b *bstream) padToByte() { + if b.count != 8 { + b.count = 0 + } +} + +func (b *bstream) clear() { + b.stream = b.stream[:0] + b.count = 0 +} + +func (b *bstream) writeBit(bit bit) { + if b.count == 0 { + b.stream = append(b.stream, 0) + b.count = 8 + } + + i := len(b.stream) - 1 + + if bit { + b.stream[i] |= 1 << (b.count - 1) + } + + b.count-- +} + +func (b *bstream) writeByte(byt byte) { + if b.count == 0 { + b.stream = append(b.stream, 0) + b.count = 8 + } + + i := len(b.stream) - 1 + + // fill up b.b with b.count bits from byt + b.stream[i] |= byt >> (8 - b.count) + + b.stream = append(b.stream, 0) + i++ + b.stream[i] = byt << b.count +} + +func (b *bstream) writeBits(u uint64, nbits int) { + u <<= (64 - uint(nbits)) + for nbits >= 8 { + byt := byte(u >> 56) + b.writeByte(byt) + u <<= 8 + nbits -= 8 + } + + for nbits > 0 { + b.writeBit((u >> 63) == 1) + u <<= 1 + nbits-- + } +} + +func (b *bstream) readBit() (bit, error) { + if len(b.stream) == 0 { + return false, io.EOF + } + + if b.count == 0 { + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return false, io.EOF + } + b.count = 8 + } + + d := (b.stream[0] << (8 - b.count)) & 0x80 + b.count-- + return d != 0, nil +} + +func (b *bstream) ReadByte() (byte, error) { + return b.readByte() +} + +// read one byte without moving the cursor +func (b *bstream) PeekByte() byte { + if b.count == 0 { + if len(b.stream) < 1 { + return 0 + } + return b.stream[1] + } + + return b.stream[0] +} + +func (b *bstream) readByte() (byte, error) { + if len(b.stream) == 0 { + return 0, io.EOF + } + + if b.count == 0 { + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return 0, io.EOF + } + return b.stream[0], nil + } + + if b.count == 8 { + b.count = 0 + return b.stream[0], nil + } + + byt := b.stream[0] << (8 - b.count) + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return 0, io.EOF + } + + // We just advanced the stream and can assume the shift to be 0. + byt |= b.stream[0] >> b.count + + return byt, nil +} + +func (b *bstream) readBits(nbits int) (uint64, error) { + var u uint64 + + for nbits >= 8 { + byt, err := b.readByte() + if err != nil { + return 0, err + } + + u = (u << 8) | uint64(byt) + nbits -= 8 + } + + if nbits == 0 { + return u, nil + } + + if nbits > int(b.count) { + u = (u << uint(b.count)) | uint64((b.stream[0]<<(8-b.count))>>(8-b.count)) + nbits -= int(b.count) + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return 0, io.EOF + } + b.count = 8 + } + + u = (u << uint(nbits)) | uint64((b.stream[0]<<(8-b.count))>>(8-uint(nbits))) + b.count -= uint8(nbits) + return u, nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunk.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunk.go new file mode 100644 index 00000000..62e0ee3d --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunk.go @@ -0,0 +1,111 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. + +The code in this file was largely written by Prometheus Authors as part of +https://github.com/prometheus/prometheus +Copyright 2017 The Prometheus Authors +And is also licensed under the Apache License, Version 2.0; + +And was modified to suit Iguazio needs + +*/ + +package chunkenc + +import ( + "fmt" + + "github.com/nuclio/logger" +) + +// Encoding is the identifier for chunk encoding. +type Encoding uint8 + +func (e Encoding) String() string { + switch e { + case EncNone: + return "none" + case EncXOR: + return "XOR" + case EncVariant: + return "Variant" + } + return "" +} + +// Available chunk encodings +const ( + EncNone Encoding = 0 + EncXOR Encoding = 1 + EncVariant Encoding = 2 +) + +// Chunk holds a sequence of sample pairs that can be iterated over and appended to. +type Chunk interface { + Bytes() []byte + Clear() + Encoding() Encoding + Appender() (Appender, error) + Iterator() Iterator +} + +func NewChunk(logger logger.Logger, variant bool) Chunk { + if variant { + return newVarChunk(logger) + } + return newXORChunk(logger) +} + +// FromData returns a chunk from a byte slice of chunk data. +func FromData(logger logger.Logger, e Encoding, d []byte, samples uint16) (Chunk, error) { + switch e { + case EncXOR: + return &XORChunk{logger: logger, b: &bstream{count: 0, stream: d}, samples: samples}, nil + case EncVariant: + return &VarChunk{logger: logger, b: d, samples: samples}, nil + } + return nil, fmt.Errorf("Unknown chunk encoding: %d", e) +} + +// Appender adds metric-sample pairs to a chunk. +type Appender interface { + Append(int64, interface{}) + Chunk() Chunk + Encoding() Encoding +} + +// Iterator is a simple iterator that can only get the next value. +type Iterator interface { + At() (int64, float64) + AtString() (int64, string) + Err() error + Next() bool +} + +// NewNopIterator returns a new chunk iterator that doesn't hold any data. +func NewNopIterator() Iterator { + return nopIterator{} +} + +type nopIterator struct{} + +func (nopIterator) At() (int64, float64) { return 0, 0 } +func (nopIterator) AtString() (int64, string) { return 0, "" } +func (nopIterator) Next() bool { return false } +func (nopIterator) Err() error { return nil } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunkenc_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunkenc_test.go new file mode 100644 index 00000000..9b269fa8 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunkenc_test.go @@ -0,0 +1,158 @@ +// +build unit + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package chunkenc + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/nuclio/zap" + "github.com/stretchr/testify/assert" +) + +const basetime = 1524690488000 + +type sample struct { + t int64 + v float64 +} + +// [132 180 199 187 191 88 63 240 - 0 0 0 0 0 0 154 8 - 194 95 255 108 7 126 113 172 - 46 18 195 104 59 202 237 129 - 119 243 146] + +func TestXor(tst *testing.T) { + tst.Skip("Needs to be refactored - Doesn't test anything") + + samples := GenSamples(1000, 5, 1000, 100) + //samples := RealSample(1000) + var byteArray []byte + + logger, err := nucliozap.NewNuclioZapTest("test") + assert.Nil(tst, err) + + ch := newXORChunk(logger) + appender, err := ch.Appender() + if err != nil { + tst.Fatal(err) + } + + for i, s := range samples { + fmt.Println("t,v: ", s.t, s.v) + appender.Append(s.t, s.v) + b := ch.Bytes() + fmt.Println(b, len(b)) + byteArray = append(byteArray, b...) + ch.Clear() + if i == 4 { + fmt.Println("restarted appender") + ch = newXORChunk(logger) + appender, err = ch.Appender() + if err != nil { + tst.Fatal(err) + } + + } + } + + fmt.Println("Samples:", len(samples), "byteArray:", byteArray, len(byteArray)) + + ch2, err := FromData(logger, EncXOR, byteArray, 0) + if err != nil { + tst.Fatal(err) + } + + iter := ch2.Iterator() + i := 0 + for iter.Next() { + + if iter.Err() != nil { + tst.Fatal(iter.Err()) + } + + t, v := iter.At() + isMatch := t == samples[i].t && v == samples[i].v + fmt.Println("t, v, match: ", t, v, isMatch) + if !isMatch { + tst.Fatalf("iterator t or v doesnt match appended index %d len %d", i, len(samples)) + } + i++ + } + fmt.Println() + + if i != len(samples) { + tst.Fatalf("number of iterator samples (%d) != num of appended (%d)", i, len(samples)) + } + +} + +func TestBstream(t *testing.T) { + t.Skip("Needs to be refactored - Doesn't test anything") + + src := &bstream{count: 8, stream: []byte{0x55, 0x44, 0x33}} + + bs := newBWriter(8) + byt, _ := src.readByte() + bs.writeByte(byt) + fmt.Println(bs.count, bs.stream) + for i := 1; i < 18; i++ { + bit, _ := src.readBit() + fmt.Println(bs.count, bs.stream, bit) + bs.writeBit(bit) + } + + fmt.Println("Reading:") + bs2 := &bstream{count: 8, stream: bs.stream} + fmt.Println(bs2.count, bs2.stream) + for i := 1; i < 18; i++ { + bit, _ := bs2.readBit() + fmt.Println(bs2.count, bs2.stream, bit) + } + +} + +func GenSamples(num, interval int, start, step float64) []sample { + samples := []sample{} + curTime := int64(basetime) + v := start + + for i := 0; i <= num; i++ { + curTime += int64(interval * 1000) + t := curTime + int64(rand.Intn(100)) - 50 + v += float64(rand.Intn(100)-50) / 100 * step + //fmt.Printf("t-%d,v%.2f ", t, v) + samples = append(samples, sample{t: t, v: v}) + } + + return samples +} + +var timeList = []int64{1360281600000, 1360540800000, 1360627200000, 1360713600000, 1360800000000, 1360886400000, 1361232000000, 1361318400000, 1361404800000, 1361491200000, 1361750400000, 1361836800000, 1361923200000, 1362009600000, 1362096000000, 1362355200000, 1362441600000, 1362528000000, 1362614400000, 1362700800000, 1362960000000, 1363046400000, 1363132800000, 1363219200000, 1363305600000, 1363564800000, 1363651200000, 1363737600000, 1363824000000, 1363910400000, 1364169600000, 1364256000000, 1364342400000, 1364428800000, 1364774400000, 1364860800000, 1364947200000, 1365033600000, 1365120000000, 1365379200000, 1365465600000, 1365552000000, 1365638400000, 1365724800000, 1365984000000, 1366070400000, 1366156800000, 1366243200000, 1366329600000, 1366588800000, 1366675200000, 1366761600000, 1366848000000, 1366934400000, 1367193600000, 1367280000000, 1367366400000, 1367452800000, 1367539200000, 1367798400000, 1367884800000, 1367971200000, 1368057600000, 1368144000000, 1368403200000, 1368489600000, 1368576000000, 1368662400000, 1368748800000, 1369008000000, 1369094400000, 1369180800000, 1369267200000, 1369353600000, 1369699200000, 1369785600000, 1369872000000, 1369958400000, 1370217600000, 1370304000000, 1370390400000, 1370476800000, 1370563200000, 1370822400000, 1370908800000, 1370995200000, 1371081600000, 1371168000000, 1371427200000, 1371513600000, 1371600000000, 1371686400000, 1371772800000, 1372032000000, 1372118400000, 1372204800000, 1372291200000, 1372377600000, 1372636800000, 1372723200000, 1372809600000, 1372982400000, 1373241600000, 1373328000000, 1373414400000, 1373500800000, 1373587200000, 1373846400000, 1373932800000, 1374019200000, 1374105600000, 1374192000000, 1374451200000, 1374537600000, 1374624000000, 1374710400000, 1374796800000, 1375056000000, 1375142400000, 1375228800000, 1375315200000, 1375401600000, 1375660800000, 1375747200000, 1375833600000, 1375920000000, 1376006400000, 1376265600000, 1376352000000, 1376438400000, 1376524800000, 1376611200000, 1376870400000, 1376956800000, 1377043200000, 1377129600000, 1377216000000, 1377475200000, 1377561600000, 1377648000000, 1377734400000, 1377820800000, 1378166400000, 1378252800000, 1378339200000, 1378425600000, 1378684800000, 1378771200000, 1378857600000, 1378944000000, 1379030400000, 1379289600000, 1379376000000, 1379462400000, 1379548800000, 1379635200000, 1379894400000, 1379980800000, 1380067200000, 1380153600000, 1380240000000, 1380499200000, 1380585600000, 1380672000000, 1380758400000, 1380844800000, 1381104000000, 1381190400000, 1381276800000, 1381363200000, 1381449600000, 1381708800000, 1381795200000, 1381881600000, 1381968000000, 1382054400000, 1382313600000, 1382400000000, 1382486400000, 1382572800000, 1382659200000, 1382918400000, 1383004800000, 1383091200000, 1383177600000, 1383264000000, 1383523200000, 1383609600000, 1383696000000, 1383782400000, 1383868800000, 1384128000000, 1384214400000, 1384300800000, 1384387200000, 1384473600000, 1384732800000, 1384819200000, 1384905600000, 1384992000000, 1385078400000, 1385337600000, 1385424000000, 1385510400000, 1385683200000, 1385942400000, 1386028800000, 1386115200000, 1386201600000, 1386288000000, 1386547200000, 1386633600000, 1386720000000, 1386806400000, 1386892800000, 1387152000000, 1387238400000, 1387324800000, 1387411200000, 1387497600000, 1387756800000, 1387843200000, 1388016000000, 1388102400000, 1388361600000, 1388448000000, 1388620800000, 1388707200000, 1388966400000, 1389052800000, 1389139200000, 1389225600000, 1389312000000, 1389571200000, 1389657600000, 1389744000000, 1389830400000, 1389916800000, 1390262400000, 1390348800000, 1390435200000, 1390521600000, 1390780800000, 1390867200000, 1390953600000, 1391040000000, 1391126400000, 1391385600000, 1391472000000, 1391558400000, 1391644800000, 1391731200000, 1391990400000, 1392076800000, 1392163200000, 1392249600000, 1392336000000, 1392681600000, 1392768000000, 1392854400000, 1392940800000, 1393200000000, 1393286400000, 1393372800000, 1393459200000, 1393545600000, 1393804800000, 1393891200000, 1393977600000, 1394064000000, 1394150400000, 1394409600000, 1394496000000, 1394582400000, 1394668800000, 1394755200000, 1395014400000, 1395100800000, 1395187200000, 1395273600000, 1395360000000, 1395619200000, 1395705600000, 1395792000000, 1395878400000, 1395964800000, 1396224000000, 1396310400000, 1396396800000, 1396483200000, 1396569600000, 1396828800000, 1396915200000, 1397001600000, 1397088000000, 1397174400000, 1397433600000, 1397520000000, 1397606400000, 1397692800000, 1398038400000, 1398124800000, 1398211200000, 1398297600000, 1398384000000, 1398643200000, 1398729600000, 1398816000000, 1398902400000, 1398988800000, 1399248000000, 1399334400000, 1399420800000, 1399507200000, 1399593600000, 1399852800000, 1399939200000, 1400025600000, 1400112000000, 1400198400000, 1400457600000, 1400544000000, 1400630400000, 1400716800000, 1400803200000, 1401148800000, 1401235200000, 1401321600000, 1401408000000, 1401667200000, 1401753600000, 1401840000000, 1401926400000, 1402012800000, 1402272000000, 1402358400000, 1402444800000, 1402531200000, 1402617600000, 1402876800000, 1402963200000, 1403049600000, 1403136000000, 1403222400000, 1403481600000, 1403568000000, 1403654400000, 1403740800000, 1403827200000, 1404086400000, 1404172800000, 1404259200000, 1404345600000, 1404691200000, 1404777600000, 1404864000000, 1404950400000, 1405036800000, 1405296000000, 1405382400000, 1405468800000, 1405555200000, 1405641600000, 1405900800000, 1405987200000, 1406073600000, 1406160000000, 1406246400000, 1406505600000, 1406592000000, 1406678400000, 1406764800000, 1406851200000, 1407110400000, 1407196800000, 1407283200000, 1407369600000, 1407456000000, 1407715200000, 1407801600000, 1407888000000, 1407974400000, 1408060800000, 1408320000000, 1408406400000, 1408492800000, 1408579200000, 1408665600000, 1408924800000, 1409011200000, 1409097600000, 1409184000000, 1409270400000, 1409616000000, 1409702400000, 1409788800000, 1409875200000, 1410134400000, 1410220800000, 1410307200000, 1410393600000, 1410480000000, 1410739200000, 1410825600000, 1410912000000, 1410998400000, 1411084800000, 1411344000000, 1411430400000, 1411516800000, 1411603200000, 1411689600000, 1411948800000, 1412035200000, 1412121600000, 1412208000000, 1412294400000, 1412553600000, 1412640000000, 1412726400000, 1412812800000, 1412899200000, 1413158400000, 1413244800000, 1413331200000, 1413417600000, 1413504000000, 1413763200000, 1413849600000, 1413936000000, 1414022400000, 1414108800000, 1414368000000, 1414454400000, 1414540800000, 1414627200000, 1414713600000, 1414972800000, 1415059200000, 1415145600000, 1415232000000, 1415318400000, 1415577600000, 1415664000000, 1415750400000, 1415836800000, 1415923200000, 1416182400000, 1416268800000, 1416355200000, 1416441600000, 1416528000000, 1416787200000, 1416873600000, 1416960000000, 1417132800000, 1417392000000, 1417478400000, 1417564800000, 1417651200000, 1417737600000, 1417996800000, 1418083200000, 1418169600000, 1418256000000, 1418342400000, 1418601600000, 1418688000000, 1418774400000, 1418860800000, 1418947200000, 1419206400000, 1419292800000, 1419379200000, 1419552000000, 1419811200000, 1419897600000, 1419984000000, 1420156800000, 1420416000000, 1420502400000, 1420588800000, 1420675200000, 1420761600000, 1421020800000, 1421107200000, 1421193600000, 1421280000000, 1421366400000, 1421712000000, 1421798400000, 1421884800000, 1421971200000, 1422230400000, 1422316800000, 1422403200000, 1422489600000, 1422576000000, 1422835200000, 1422921600000, 1423008000000, 1423094400000, 1423180800000, 1423440000000, 1423526400000, 1423612800000, 1423699200000, 1423785600000, 1424131200000, 1424217600000, 1424304000000, 1424390400000, 1424649600000, 1424736000000, 1424822400000, 1424908800000, 1424995200000, 1425254400000, 1425340800000, 1425427200000, 1425513600000, 1425600000000, 1425859200000, 1425945600000, 1426032000000, 1426118400000, 1426204800000, 1426464000000, 1426550400000, 1426636800000, 1426723200000, 1426809600000, 1427068800000, 1427155200000, 1427241600000, 1427328000000, 1427414400000, 1427673600000, 1427760000000, 1427846400000, 1427932800000, 1428278400000, 1428364800000, 1428451200000, 1428537600000, 1428624000000, 1428883200000, 1428969600000, 1429056000000, 1429142400000, 1429228800000, 1429488000000, 1429574400000, 1429660800000, 1429747200000, 1429833600000, 1430092800000, 1430179200000, 1430265600000, 1430352000000, 1430438400000, 1430697600000, 1430784000000, 1430870400000, 1430956800000, 1431043200000, 1431302400000, 1431388800000, 1431475200000, 1431561600000, 1431648000000, 1431907200000, 1431993600000, 1432080000000, 1432166400000, 1432252800000, 1432598400000, 1432684800000, 1432771200000, 1432857600000, 1433116800000, 1433203200000, 1433289600000, 1433376000000, 1433462400000, 1433721600000, 1433808000000, 1433894400000, 1433980800000, 1434067200000, 1434326400000, 1434412800000, 1434499200000, 1434585600000, 1434672000000, 1434931200000, 1435017600000, 1435104000000, 1435190400000, 1435276800000, 1435536000000, 1435622400000, 1435708800000, 1435795200000, 1436140800000, 1436227200000, 1436313600000, 1436400000000, 1436486400000, 1436745600000, 1436832000000, 1436918400000, 1437004800000, 1437091200000, 1437350400000, 1437436800000, 1437523200000, 1437609600000, 1437696000000, 1437955200000, 1438041600000, 1438128000000, 1438214400000, 1438300800000, 1438560000000, 1438646400000, 1438732800000, 1438819200000, 1438905600000, 1439164800000, 1439251200000, 1439337600000, 1439424000000, 1439510400000, 1439769600000, 1439856000000, 1439942400000, 1440028800000, 1440115200000, 1440374400000, 1440460800000, 1440547200000, 1440633600000, 1440720000000, 1440979200000, 1441065600000, 1441152000000, 1441238400000, 1441324800000, 1441670400000, 1441756800000, 1441843200000, 1441929600000, 1442188800000, 1442275200000, 1442361600000, 1442448000000, 1442534400000, 1442793600000, 1442880000000, 1442966400000, 1443052800000, 1443139200000, 1443398400000, 1443484800000, 1443571200000, 1443657600000, 1443744000000, 1444003200000, 1444089600000, 1444176000000, 1444262400000, 1444348800000, 1444608000000, 1444694400000, 1444780800000, 1444867200000, 1444953600000, 1445212800000, 1445299200000, 1445385600000, 1445472000000, 1445558400000, 1445817600000, 1445904000000, 1445990400000, 1446076800000, 1446163200000, 1446422400000, 1446508800000, 1446595200000, 1446681600000, 1446768000000, 1447027200000, 1447113600000, 1447200000000, 1447286400000, 1447372800000, 1447632000000, 1447718400000, 1447804800000, 1447891200000, 1447977600000, 1448236800000, 1448323200000, 1448409600000, 1448582400000, 1448841600000, 1448928000000, 1449014400000, 1449100800000, 1449187200000, 1449446400000, 1449532800000, 1449619200000, 1449705600000, 1449792000000, 1450051200000, 1450137600000, 1450224000000, 1450310400000, 1450396800000, 1450656000000, 1450742400000, 1450828800000, 1450915200000, 1451260800000, 1451347200000, 1451433600000, 1451520000000, 1451865600000, 1451952000000, 1452038400000, 1452124800000, 1452211200000, 1452470400000, 1452556800000, 1452643200000, 1452729600000, 1452816000000, 1453161600000, 1453248000000, 1453334400000, 1453420800000, 1453680000000, 1453766400000, 1453852800000, 1453939200000, 1454025600000, 1454284800000, 1454371200000, 1454457600000, 1454544000000, 1454630400000, 1454889600000, 1454976000000, 1455062400000, 1455148800000, 1455235200000, 1455580800000, 1455667200000, 1455753600000, 1455840000000, 1456099200000, 1456185600000, 1456272000000, 1456358400000, 1456444800000, 1456704000000, 1456790400000, 1456876800000, 1456963200000, 1457049600000, 1457308800000, 1457395200000, 1457481600000, 1457568000000, 1457654400000, 1457913600000, 1458000000000, 1458086400000, 1458172800000, 1458259200000, 1458518400000, 1458604800000, 1458691200000, 1458777600000, 1459123200000, 1459209600000, 1459296000000, 1459382400000, 1459468800000, 1459728000000, 1459814400000, 1459900800000, 1459987200000, 1460073600000, 1460332800000, 1460419200000, 1460505600000, 1460592000000, 1460678400000, 1460937600000, 1461024000000, 1461110400000, 1461196800000, 1461283200000, 1461542400000, 1461628800000, 1461715200000, 1461801600000, 1461888000000, 1462147200000, 1462233600000, 1462320000000, 1462406400000, 1462492800000, 1462752000000, 1462838400000, 1462924800000, 1463011200000, 1463097600000, 1463356800000, 1463443200000, 1463529600000, 1463616000000, 1463702400000, 1463961600000, 1464048000000, 1464134400000, 1464220800000, 1464307200000, 1464652800000, 1464739200000, 1464825600000, 1464912000000, 1465171200000, 1465257600000, 1465344000000, 1465430400000, 1465516800000, 1465776000000, 1465862400000, 1465948800000, 1466035200000, 1466121600000, 1466380800000, 1466467200000, 1466553600000, 1466640000000, 1466726400000, 1466985600000, 1467072000000, 1467158400000, 1467244800000, 1467331200000, 1467676800000, 1467763200000, 1467849600000, 1467936000000, 1468195200000, 1468281600000, 1468368000000, 1468454400000, 1468540800000, 1468800000000, 1468886400000, 1468972800000, 1469059200000, 1469145600000, 1469404800000, 1469491200000, 1469577600000, 1469664000000, 1469750400000, 1470009600000, 1470096000000, 1470182400000, 1470268800000, 1470355200000, 1470614400000, 1470700800000, 1470787200000, 1470873600000, 1470960000000, 1471219200000, 1471305600000, 1471392000000, 1471478400000, 1471564800000, 1471824000000, 1471910400000, 1471996800000, 1472083200000, 1472169600000, 1472428800000, 1472515200000, 1472601600000, 1472688000000, 1472774400000, 1473120000000, 1473206400000, 1473292800000, 1473379200000, 1473638400000, 1473724800000, 1473811200000, 1473897600000, 1473984000000, 1474243200000, 1474329600000, 1474416000000, 1474502400000, 1474588800000, 1474848000000, 1474934400000, 1475020800000, 1475107200000, 1475193600000, 1475452800000, 1475539200000, 1475625600000, 1475712000000, 1475798400000, 1476057600000, 1476144000000, 1476230400000, 1476316800000, 1476403200000, 1476662400000, 1476748800000, 1476835200000, 1476921600000, 1477008000000, 1477267200000, 1477353600000, 1477440000000, 1477526400000, 1477612800000, 1477872000000, 1477958400000, 1478044800000, 1478131200000, 1478217600000, 1478476800000, 1478563200000, 1478649600000, 1478736000000, 1478822400000, 1479081600000, 1479168000000, 1479254400000, 1479340800000, 1479427200000, 1479686400000, 1479772800000, 1479859200000, 1480032000000, 1480291200000, 1480377600000, 1480464000000, 1480550400000, 1480636800000, 1480896000000, 1480982400000, 1481068800000, 1481155200000, 1481241600000, 1481500800000, 1481587200000, 1481673600000, 1481760000000, 1481846400000, 1482105600000, 1482192000000, 1482278400000, 1482364800000, 1482451200000, 1482796800000, 1482883200000, 1482969600000, 1483056000000, 1483401600000, 1483488000000, 1483574400000, 1483660800000, 1483920000000, 1484006400000, 1484092800000, 1484179200000, 1484265600000, 1484611200000, 1484697600000, 1484784000000, 1484870400000, 1485129600000, 1485216000000, 1485302400000, 1485388800000, 1485475200000, 1485734400000, 1485820800000, 1485907200000, 1485993600000, 1486080000000, 1486339200000, 1486425600000, 1486512000000, 1486598400000, 1486684800000, 1486944000000, 1487030400000, 1487116800000, 1487203200000, 1487289600000, 1487635200000, 1487721600000, 1487808000000, 1487894400000, 1488153600000, 1488240000000, 1488326400000, 1488412800000, 1488499200000, 1488758400000, 1488844800000, 1488931200000, 1489017600000, 1489104000000, 1489363200000, 1489449600000, 1489536000000, 1489622400000, 1489708800000, 1489968000000, 1490054400000, 1490140800000, 1490227200000, 1490313600000, 1490572800000, 1490659200000, 1490745600000, 1490832000000, 1490918400000, 1491177600000, 1491264000000, 1491350400000, 1491436800000, 1491523200000, 1491782400000, 1491868800000, 1491955200000, 1492041600000, 1492387200000, 1492473600000, 1492560000000, 1492646400000, 1492732800000, 1492992000000, 1493078400000, 1493164800000, 1493251200000, 1493337600000, 1493596800000, 1493683200000, 1493769600000, 1493856000000, 1493942400000, 1494201600000, 1494288000000, 1494374400000, 1494460800000, 1494547200000, 1494806400000, 1494892800000, 1494979200000, 1495065600000, 1495152000000, 1495411200000, 1495497600000, 1495584000000, 1495670400000, 1495756800000, 1496102400000, 1496188800000, 1496275200000, 1496361600000, 1496620800000, 1496707200000, 1496793600000, 1496880000000, 1496966400000, 1497225600000, 1497312000000, 1497398400000, 1497484800000, 1497571200000, 1497830400000, 1497916800000, 1498003200000, 1498089600000, 1498176000000, 1498435200000, 1498521600000, 1498608000000, 1498694400000, 1498780800000, 1499040000000, 1499212800000, 1499299200000, 1499385600000, 1499644800000, 1499731200000, 1499817600000, 1499904000000, 1499990400000, 1500249600000, 1500336000000, 1500422400000, 1500508800000, 1500595200000, 1500854400000, 1500940800000, 1501027200000, 1501113600000, 1501200000000, 1501459200000, 1501545600000, 1501632000000, 1501718400000, 1501804800000, 1502064000000, 1502150400000, 1502236800000, 1502323200000, 1502409600000, 1502668800000, 1502755200000, 1502841600000, 1502928000000, 1503014400000, 1503273600000, 1503360000000, 1503446400000, 1503532800000, 1503619200000, 1503878400000, 1503964800000, 1504051200000, 1504137600000, 1504224000000, 1504569600000, 1504656000000, 1504742400000, 1504828800000, 1505088000000, 1505174400000, 1505260800000, 1505347200000, 1505433600000, 1505692800000, 1505779200000, 1505865600000, 1505952000000, 1506038400000, 1506297600000, 1506384000000, 1506470400000, 1506556800000, 1506643200000, 1506902400000, 1506988800000, 1507075200000, 1507161600000, 1507248000000, 1507507200000, 1507593600000, 1507680000000, 1507766400000, 1507852800000, 1508112000000, 1508198400000, 1508284800000, 1508371200000, 1508457600000, 1508716800000, 1508803200000, 1508889600000, 1508976000000, 1509062400000, 1509321600000, 1509408000000, 1509494400000, 1509580800000, 1509667200000, 1509926400000, 1510012800000, 1510099200000, 1510185600000, 1510272000000, 1510531200000, 1510617600000, 1510704000000, 1510790400000, 1510876800000, 1511136000000, 1511222400000, 1511308800000, 1511481600000, 1511740800000, 1511827200000, 1511913600000, 1512000000000, 1512086400000, 1512345600000, 1512432000000, 1512518400000, 1512604800000, 1512691200000, 1512950400000, 1513036800000, 1513123200000, 1513209600000, 1513296000000, 1513555200000, 1513641600000, 1513728000000, 1513814400000, 1513900800000, 1514246400000, 1514332800000, 1514419200000, 1514505600000, 1514851200000, 1514937600000, 1515024000000, 1515110400000, 1515369600000, 1515456000000, 1515542400000, 1515628800000, 1515715200000, 1516060800000, 1516147200000, 1516233600000, 1516320000000, 1516579200000, 1516665600000, 1516752000000, 1516838400000, 1516924800000, 1517184000000, 1517270400000, 1517356800000, 1517443200000, 1517529600000, 1517788800000, 1517875200000, 1517961600000} +var valList = []float64{27.260000, 27.405000, 27.370000, 27.370000, 27.610000, 27.400000, 27.290000, 27.815000, 26.810000, 28.230000, 30.130000, 29.455000, 30.370000, 31.250000, 30.900000, 31.550000, 31.865000, 31.310000, 31.250000, 32.485000, 32.295000, 33.000000, 32.560000, 32.925000, 34.020000, 33.115000, 33.940000, 34.165000, 33.750000, 34.000000, 34.135000, 33.495000, 33.630000, 33.850000, 33.485000, 33.720000, 33.265000, 32.475000, 32.120000, 34.215000, 34.340000, 35.375000, 35.365000, 34.680000, 33.525000, 32.277500, 32.225000, 32.310000, 32.895000, 32.325000, 32.905000, 33.125000, 33.710000, 34.245000, 33.965000, 34.060000, 33.785000, 33.260000, 33.420000, 33.655000, 34.055000, 33.985000, 33.910000, 33.740000, 33.345000, 33.415000, 34.120000, 34.265000, 33.990000, 35.155000, 36.215000, 35.620000, 34.905000, 35.810000, 36.355000, 36.000000, 36.100000, 35.840000, 35.250000, 35.365000, 35.120000, 34.905000, 35.525000, 36.310000, 35.260000, 35.510000, 34.655000, 35.345000, 35.235000, 35.700000, 36.115000, 35.105000, 34.685000, 33.645000, 34.965000, 35.520000, 35.885000, 35.180000, 35.840000, 35.400000, 35.540000, 36.090000, 35.725000, 36.010000, 35.995000, 36.205000, 35.455000, 35.415000, 35.145000, 34.790000, 35.100000, 36.475000, 36.670000, 36.495000, 36.225000, 37.505000, 38.285000, 38.740000, 37.990000, 38.540000, 38.440000, 38.670000, 38.540000, 38.525000, 38.580000, 38.015000, 37.825000, 37.945000, 37.675000, 37.710000, 36.970000, 37.550000, 37.230000, 37.000000, 37.620000, 37.680000, 38.730000, 39.000000, 38.440000, 38.740000, 39.520000, 39.350000, 39.770000, 39.720000, 39.650000, 38.500000, 38.980000, 39.000000, 38.760000, 38.910000, 38.040000, 37.950000, 37.740000, 38.070000, 38.470000, 38.070000, 37.510000, 37.270000, 36.620000, 36.430000, 37.010000, 36.670000, 37.320000, 36.650000, 36.040000, 35.760000, 35.850000, 35.850000, 35.090000, 36.190000, 36.080000, 36.360000, 36.660000, 36.730000, 37.550000, 36.620000, 36.250000, 36.180000, 35.680000, 34.840000, 35.950000, 37.040000, 37.080000, 36.470000, 35.430000, 35.240000, 35.220000, 34.860000, 35.000000, 34.000000, 32.730000, 33.660000, 33.600000, 33.290000, 33.660000, 33.290000, 33.600000, 32.770000, 33.950000, 34.330000, 34.890000, 35.280000, 34.680000, 34.270000, 34.470000, 34.570000, 34.500000, 34.480000, 34.130000, 36.260000, 36.540000, 36.900000, 36.270000, 35.810000, 36.490000, 36.190100, 35.770000, 36.040000, 37.150000, 38.600000, 38.540000, 38.560000, 38.970000, 38.560000, 39.020000, 38.670000, 38.570000, 38.170000, 38.150000, 38.400000, 38.730000, 38.930000, 37.430000, 37.570000, 36.550000, 37.390000, 37.870000, 38.280100, 37.950000, 39.590000, 40.060000, 39.580000, 39.550000, 39.540000, 39.620000, 40.560000, 39.370000, 40.550000, 40.120000, 41.729900, 40.510000, 40.300000, 39.130000, 38.820000, 39.470000, 38.360000, 38.910000, 39.440000, 39.550000, 40.100000, 38.140000, 36.050000, 35.590000, 35.430000, 35.460000, 35.430000, 34.880000, 35.030000, 34.720000, 35.000000, 35.330000, 35.000000, 35.230000, 35.000000, 35.090000, 34.620000, 34.130000, 33.280000, 33.620000, 33.380000, 33.320000, 32.880000, 32.850000, 32.640000, 32.310000, 33.640000, 33.800000, 34.080000, 34.300000, 35.100000, 35.710000, 34.500000, 34.130000, 34.510000, 33.480000, 32.350000, 32.630000, 32.730000, 33.180000, 34.030000, 34.780000, 35.030000, 35.960000, 36.950000, 37.650000, 38.450000, 39.000000, 38.860000, 39.260000, 38.999900, 38.700000, 38.760000, 38.870000, 37.920000, 37.250000, 36.930000, 37.360000, 37.420000, 37.000000, 37.150000, 36.520000, 36.700000, 36.000000, 36.660000, 36.170000, 36.000000, 36.620000, 36.100000, 36.340000, 36.210000, 35.790000, 35.980000, 36.140000, 36.320000, 36.020000, 35.650000, 34.720000, 35.090000, 35.190000, 34.840000, 34.620000, 34.890000, 34.960000, 35.140000, 34.750000, 34.340000, 33.890000, 34.340000, 33.990000, 34.200000, 34.150000, 34.220000, 35.230000, 34.720000, 34.020000, 34.630000, 34.420000, 34.600000, 34.100000, 34.400000, 34.260000, 33.500000, 33.500000, 33.300000, 33.400000, 33.220000, 34.450000, 32.870000, 32.390000, 32.890000, 33.560000, 33.000000, 32.780000, 32.780000, 34.170000, 33.600000, 33.700000, 33.560000, 34.380000, 34.120000, 33.560000, 33.190000, 33.210000, 33.150000, 33.340000, 32.830000, 33.370000, 33.170000, 33.120000, 33.670000, 33.340000, 32.880000, 33.310000, 33.910000, 33.490000, 33.840000, 33.490000, 33.870000, 33.610000, 33.550000, 33.460000, 33.280000, 32.960000, 33.160000, 33.900000, 33.830000, 33.310000, 32.960000, 32.490000, 30.970000, 31.950000, 31.370000, 31.970000, 32.900000, 32.640000, 31.540000, 31.520000, 31.160000, 31.120000, 30.960000, 31.010000, 29.820000, 29.250000, 28.750000, 27.950000, 29.670000, 31.710000, 30.830000, 31.380000, 31.640000, 30.670000, 32.020000, 30.480000, 30.730000, 31.910000, 31.410000, 30.660000, 31.440000, 30.410000, 31.170000, 31.510000, 32.820000, 33.800000, 32.860000, 33.280000, 32.970000, 32.920000, 33.580000, 33.430000, 33.670000, 33.990000, 35.230000, 34.230000, 34.600000, 33.970000, 32.130000, 32.780000, 32.900000, 31.890000, 31.070000, 31.160000, 30.260000, 29.660000, 29.850000, 29.410000, 29.050000, 30.230000, 29.360000, 30.510000, 32.830000, 32.620000, 30.880000, 30.740000, 30.450000, 30.470000, 30.110000, 29.690000, 29.430000, 29.470000, 29.540000, 28.320000, 28.490000, 28.280000, 28.840000, 29.530000, 29.600000, 29.560000, 30.040000, 28.850000, 28.750000, 29.140000, 29.090000, 28.760000, 28.490000, 28.560000, 27.900000, 27.210000, 26.290000, 26.860000, 27.700000, 27.100000, 26.850000, 26.870000, 26.970000, 27.260000, 27.740000, 28.560000, 28.240000, 27.920000, 27.780000, 26.820000, 27.630000, 27.640000, 27.750000, 28.410000, 29.750000, 29.020000, 29.000000, 29.160000, 29.440000, 28.520000, 28.230000, 27.840000, 27.350000, 26.950000, 28.090000, 27.600000, 27.920000, 28.880000, 27.660000, 28.050000, 28.660000, 28.650000, 28.530000, 29.120000, 29.720000, 28.820000, 28.900000, 28.970000, 29.480000, 29.830000, 30.870000, 30.610000, 30.960000, 30.830000, 31.600000, 31.270000, 30.960000, 31.660000, 32.590000, 32.740000, 32.770000, 32.900000, 32.810000, 32.870000, 33.380000, 32.940000, 32.800000, 33.290000, 33.820000, 33.790000, 34.120000, 34.520000, 34.280000, 34.030000, 34.600000, 35.060000, 35.010000, 35.220000, 35.150000, 35.180000, 35.380000, 35.000000, 34.980000, 35.110000, 34.820000, 34.730000, 34.590000, 34.570000, 33.920000, 33.910000, 34.210000, 34.080000, 34.000000, 33.850000, 33.840000, 34.490000, 34.260000, 34.050000, 33.760000, 33.650000, 33.880000, 34.350000, 34.220000, 33.390000, 33.240000, 33.130000, 32.600000, 32.340000, 31.800000, 31.670000, 31.640000, 31.510000, 31.240000, 30.390000, 30.370000, 30.340000, 30.080000, 29.800000, 29.490000, 30.380000, 29.700000, 29.550000, 29.430000, 28.750000, 28.900000, 28.270000, 27.870000, 25.510000, 25.890000, 26.470000, 26.570000, 26.370000, 25.870000, 25.750000, 25.940000, 25.830000, 25.250000, 25.230000, 25.090000, 25.160000, 26.080000, 26.720000, 27.000000, 26.810000, 26.570000, 26.450000, 25.750000, 24.810000, 23.090000, 22.950000, 21.760000, 22.170000, 23.010000, 23.000000, 22.960000, 23.340000, 23.390000, 23.100000, 23.100000, 23.000000, 22.560000, 22.580000, 22.990000, 22.340000, 22.890000, 23.670000, 23.300000, 23.730000, 23.500000, 24.000000, 23.410000, 24.330000, 23.550000, 22.780000, 21.750000, 22.140000, 21.000000, 22.180000, 22.930000, 23.960000, 23.250000, 23.240000, 23.320000, 22.730000, 22.340000, 22.970000, 23.840000, 23.000000, 23.140000, 23.300000, 22.430000, 21.050000, 20.910000, 20.470000, 21.250000, 21.840000, 21.580000, 21.530000, 22.360000, 22.180000, 22.260000, 22.340000, 22.250000, 22.440000, 22.590000, 20.490000, 20.290000, 20.770000, 22.370000, 21.120000, 20.260000, 20.410000, 19.640000, 19.510000, 19.020000, 19.110000, 19.190000, 18.780000, 18.320000, 18.070000, 16.840000, 16.540000, 16.240000, 17.590000, 17.220000, 17.240000, 15.910000, 15.860000, 15.160000, 15.430000, 15.290000, 15.700000, 16.160000, 17.010000, 17.680000, 17.400000, 17.810000, 17.200000, 17.090000, 17.670000, 17.660000, 17.020000, 15.710000, 16.780000, 17.080000, 16.320000, 15.710000, 15.850000, 15.470000, 16.530000, 16.050000, 16.950000, 19.360000, 18.940000, 18.810000, 19.270000, 20.400000, 19.680000, 20.240000, 20.170000, 20.820000, 21.070000, 20.790000, 20.310000, 20.420000, 19.490000, 19.270000, 20.210000, 20.090000, 19.410000, 20.530000, 19.350000, 20.440000, 20.330000, 18.920000, 19.710000, 20.130000, 20.350000, 20.390000, 20.860000, 21.890000, 22.520000, 21.440000, 22.020000, 21.860000, 20.680000, 22.410000, 21.570000, 21.870000, 22.600000, 23.250000, 22.890000, 22.640000, 21.800000, 21.630000, 21.020000, 22.000000, 21.670000, 22.880000, 22.740000, 22.260000, 22.610000, 22.420000, 22.350000, 22.300000, 23.710000, 23.490000, 23.130000, 23.780000, 23.140000, 22.600000, 22.390000, 23.700000, 23.400000, 23.460000, 23.000000, 22.880000, 22.410000, 23.080000, 23.790000, 23.710000, 23.330000, 24.030000, 24.290000, 24.370000, 23.790000, 23.650000, 24.140000, 24.370000, 24.800000, 24.190000, 24.110000, 23.910000, 23.780000, 22.810000, 23.380000, 22.890000, 23.430000, 23.570000, 24.230000, 23.690000, 24.020000, 23.820000, 24.360000, 24.790000, 24.650000, 24.610000, 25.700000, 24.850000, 25.560000, 24.920000, 25.020000, 24.630000, 24.260000, 24.660000, 25.040000, 24.980000, 25.520000, 25.390000, 24.530000, 24.530000, 24.560000, 26.280000, 25.330000, 25.810000, 25.260000, 25.280000, 26.160000, 25.850000, 25.500000, 26.000000, 25.960000, 26.140000, 25.450000, 25.080000, 25.370000, 24.690000, 24.500000, 24.410000, 24.200000, 23.410000, 23.770000, 23.600000, 23.560000, 24.580000, 23.770000, 24.050000, 24.370000, 24.460000, 24.740000, 25.050000, 24.330000, 24.280000, 24.720000, 25.200000, 25.330000, 25.100000, 25.120000, 25.950000, 25.710000, 25.830000, 25.940000, 25.620000, 25.510000, 25.310000, 25.320000, 24.700000, 24.440000, 24.690000, 24.390000, 24.800000, 25.500000, 26.220000, 25.920000, 26.020000, 25.110000, 25.150000, 24.710000, 24.520000, 24.500000, 24.790000, 25.550000, 25.720000, 25.540000, 24.820000, 24.590000, 25.420000, 25.580000, 25.890000, 25.600000, 25.540000, 25.750000, 25.550000, 25.570000, 24.030000, 23.550000, 23.710000, 23.040000, 22.330000, 22.160000, 22.220000, 21.090000, 21.890000, 21.250000, 20.940000, 20.370000, 20.920000, 20.700000, 20.460000, 20.990000, 20.270000, 20.300000, 20.140000, 20.450000, 20.330000, 20.310000, 21.690000, 21.320000, 21.310000, 22.160000, 22.250000, 21.880000, 21.780000, 22.360000, 23.140000, 22.370000, 23.040000, 23.180000, 22.300000, 24.440000, 23.060000, 22.720000, 23.200000, 23.870000, 23.740000, 23.830000, 24.640000, 24.170000, 23.480000, 23.310000, 22.520000, 22.650000, 21.850000, 21.940000, 22.190000, 22.360000, 22.400000, 22.890000, 23.240000, 22.800000, 22.810000, 22.980000, 21.930000, 22.400000, 22.740000, 22.800000, 22.410000, 22.280000, 22.300000, 22.210000, 21.850000, 21.530000, 21.730000, 22.720000, 22.320000, 22.350000, 23.000000, 23.080000, 23.080000, 22.320000, 21.180000, 21.640000, 20.660000, 21.280000, 23.750000, 23.730000, 23.340000, 23.870000, 24.170000, 23.900000, 23.860000, 24.140000, 23.920000, 23.010000, 22.930000, 22.840000, 22.950000, 22.600000, 22.440000, 22.040000, 22.110000, 22.520000, 22.460000, 22.680000, 23.120000, 22.820000, 22.590000, 23.040000, 22.780000, 22.630000, 22.540000, 22.670000, 22.390000, 22.300000, 22.750000, 22.470000, 22.520000, 22.880000, 22.840000, 23.190000, 23.630000, 24.190000, 23.470000, 23.950000, 24.050000, 24.880000, 24.500000, 24.920000, 24.640000, 24.510000, 24.540000, 24.470000, 24.000000, 23.940000, 24.240000, 24.070000, 24.390000, 24.210000, 23.790000, 24.170000, 24.110000, 24.660000, 23.350000, 23.850000, 24.020000, 24.070000, 23.650000, 23.940000, 23.750000, 23.990000, 25.250000, 24.890000, 24.610000, 24.260000, 23.720000, 23.100000, 23.130000, 23.610000, 23.500000, 23.500000, 22.730000, 22.840000, 22.740000, 22.240000, 22.280000, 22.070000, 21.400000, 21.740000, 21.860000, 21.950000, 22.360000, 23.110000, 22.880000, 23.130000, 23.000000, 23.460000, 23.040000, 22.280000, 22.300000, 22.430000, 22.880000, 23.860000, 24.480000, 24.580000, 24.620000, 25.160000, 25.090000, 24.910000, 24.950000, 24.160000, 24.260000, 24.640000, 25.060000, 25.060000, 25.430000, 25.370000, 25.530000, 24.880000, 25.330000, 24.980000, 24.900000, 25.120000, 25.230000, 25.180000, 25.100000, 25.320000, 24.570000, 25.190000, 24.670000, 24.570000, 24.640000, 24.280000, 24.750000, 24.890000, 24.480000, 24.720000, 24.380000, 24.560000, 24.490000, 24.510000, 24.340000, 24.590000, 24.860000, 24.890000, 25.030000, 24.770000, 24.550000, 24.720000, 24.940000, 25.550000, 26.660000, 27.060000, 26.900000, 26.520000, 26.120000, 26.150000, 26.570000, 27.130000, 26.750000, 26.780000, 26.460000, 26.340000, 26.130000, 26.340000, 26.730000, 26.810000, 27.000000, 26.830000, 26.590000, 26.480000, 26.720000, 26.740000, 26.570000, 26.190000, 25.710000, 25.610000, 25.440000, 25.420000, 26.230000, 25.990000, 25.690000, 25.780000, 25.500000, 25.520000, 25.670000, 25.270000, 24.790000, 24.410000, 25.200000, 26.850000, 27.400000, 27.750000, 27.970000, 28.230000, 28.600000, 27.450000, 27.800000, 28.250000, 28.310000, 28.890000, 28.810000, 28.360000, 28.580000, 28.910000, 29.240000, 29.130000, 29.280000, 29.490000, 29.400000, 29.250000, 29.390000, 29.210000, 29.230000, 28.510000, 28.430000, 28.280000, 27.950000, 27.890000, 28.030000, 28.390000, 27.580000, 27.680000, 27.450000, 26.870000, 27.130000, 26.760000, 26.830000, 27.700000, 27.750000, 28.010000, 28.320000, 28.690000, 28.310000, 29.180000, 29.090000, 28.580000, 28.860000, 28.940000, 29.140000, 28.190000, 28.300000, 28.780000, 27.800000, 27.880000, 27.570000, 27.710000, 28.170000, 28.600000, 28.270000, 27.860000, 27.560000, 26.870000, 26.750000, 26.290000, 25.820000, 24.760000, 23.650000, 24.540000} + +func RealSample(num int) []sample { + samples := []sample{} + for i := 0; i < len(timeList) && i < num; i++ { + samples = append(samples, sample{t: timeList[i], v: valList[i]}) + } + return samples +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go new file mode 100644 index 00000000..d918dd41 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go @@ -0,0 +1,248 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package chunkenc + +import ( + "encoding/binary" + "fmt" + "math" + "strconv" + + "github.com/nuclio/logger" +) + +const ( + varTypeNil byte = 0 + // nolint: deadcode,varcheck + varTypeBlob byte = 1 + varTypeString byte = 2 + // nolint: deadcode,varcheck + varTypeBool byte = 3 + // nolint: deadcode,varcheck + varTypeFloat32 byte = 4 + varTypeFloat64 byte = 5 + // nolint: deadcode,varcheck + varTypeInt8 byte = 8 + // nolint: deadcode,varcheck + varTypeInt16 byte = 9 + // nolint: deadcode,varcheck + varTypeInt32 byte = 10 + // nolint: deadcode,varcheck + varTypeInt64 byte = 11 +) + +const ( + varValueNone byte = 0 + // nolint: deadcode,varcheck + varValueZero byte = 1 + // nolint: deadcode,varcheck + varValueOnes byte = 2 + varValueAny byte = 3 +) + +// Type encoding: 6 bits for var type, 2 bits for predefined type values (e.g. None, zero, NaN, ..) +func decodeType(t byte) (byte, byte) { return t >> 2, t & 3 } +func encodeType(varType, val byte) byte { return varType<<2 + val&3 } + +type VarChunk struct { + logger logger.Logger + + b []byte + samples uint16 + offset int +} + +// NewVarChunk returns a new chunk with variant encoding. +func newVarChunk(logger logger.Logger) Chunk { + return &VarChunk{logger: logger, b: make([]byte, 0, 1024)} +} + +// Encoding returns the encoding type. +func (c *VarChunk) Encoding() Encoding { + return EncVariant +} + +// Bytes returns the underlying byte slice of the chunk. +func (c *VarChunk) Bytes() []byte { + return c.b +} + +func (c *VarChunk) Clear() { + c.b = c.b[:0] +} + +// Appender implements the Chunk interface. +func (c *VarChunk) Appender() (Appender, error) { + a := &varAppender{logger: c.logger, c: c, samples: &c.samples} + return a, nil +} + +// Iterator implements the Chunk interface. +func (c *VarChunk) Iterator() Iterator { + return c.iterator() +} + +type varAppender struct { + logger logger.Logger + + c *VarChunk + samples *uint16 + t int64 +} + +func (a *varAppender) Encoding() Encoding { + return a.c.Encoding() +} + +func (a *varAppender) Chunk() Chunk { + return a.c +} + +func (a *varAppender) Append(t int64, v interface{}) { + if v == nil { + a.appendNoValue(t, varTypeNil, varValueNone) + return + } + + switch val := v.(type) { + case string: + a.appendWithValue(t, varTypeString, []byte(val)) + + default: + a.logger.Error("unsupported type %T of value %v\n", v, v) + } +} + +func (a *varAppender) appendNoValue(t int64, varType, varVal byte) { + head := uint64(t) & 0x00ffffffffffffff + head += uint64(encodeType(varType, varVal)) << 56 + appendUint64(&a.c.b, head) + (*a.samples)++ +} + +func appendUint64(b *[]byte, v uint64) { + for i := 0; i < 8; i++ { + *b = append(*b, byte(v)) + v = v >> 8 + } +} + +func (a *varAppender) appendWithUint(t int64, varType byte, val uint64) { + a.appendNoValue(t, varType, varValueAny) + appendUint64(&a.c.b, val) +} + +func (a *varAppender) appendWithValue(t int64, varType byte, val []byte) { + a.appendNoValue(t, varType, varValueAny) + l := uint16(len(val)) + a.c.b = append(a.c.b, byte(l)) + a.c.b = append(a.c.b, byte(l>>8)) + a.c.b = append(a.c.b, val...) +} + +func (c *VarChunk) iterator() *varIterator { + return &varIterator{ + br: c.b, + numTotal: c.samples, + } +} + +type varIterator struct { + br []byte + numTotal uint16 + numRead uint16 + + t int64 + varType byte + varVal byte + val []byte + err error +} + +func (it *varIterator) Next() bool { + if it.err != nil || len(it.br) < 8 { + return false + } + + head := binary.LittleEndian.Uint64(it.br[0:8]) + it.varType, it.varVal = decodeType(byte(head >> 56)) + it.t = int64(head & 0x00ffffffffffffff) + + it.br = it.br[8:] + + if it.varType == varTypeFloat64 && it.varVal == varValueAny { + + if len(it.br) < 8 { + return it.lenError("float64", 8) + } + it.val = it.br[0:8] + it.br = it.br[8:] + } + + if it.varType == varTypeString && it.varVal == varValueAny { + + if len(it.br) < 2 { + return it.lenError("var len", 2) + } + valLen := int(it.br[1])<<8 + int(it.br[0]) + + if len(it.br) < valLen+2 { + return it.lenError("string", valLen) + } + it.val = it.br[2 : valLen+2] + it.br = it.br[valLen+2:] + } + + return true +} + +func (it *varIterator) lenError(v string, expected int) bool { + it.err = fmt.Errorf("chunk decoding error, less than %d bytes to store %s value", expected, v) + return false +} + +func (it *varIterator) At() (int64, float64) { + + if it.varType == varTypeFloat64 { + switch it.varVal { + case varValueNone: + return it.t, math.NaN() + case varValueAny: + v := binary.LittleEndian.Uint64(it.val) + return it.t, math.Float64frombits(v) + } + } + return it.t, 0 +} + +func (it *varIterator) AtString() (int64, string) { + + if it.varType == varTypeFloat64 { + _, val := it.At() + return it.t, strconv.FormatFloat(val, 'f', -1, 64) + } + + return it.t, string(it.val) +} + +func (it *varIterator) Err() error { + return it.err +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype_test.go new file mode 100644 index 00000000..d8fcdef6 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype_test.go @@ -0,0 +1,54 @@ +// +build unit + +package chunkenc + +import ( + "fmt" + "testing" + "time" + + "github.com/nuclio/zap" + "github.com/stretchr/testify/suite" +) + +type testVarEncoderSuite struct { + suite.Suite +} + +func (suite *testVarEncoderSuite) TestStringEnc() { + + logger, err := nucliozap.NewNuclioZapTest("test") + suite.Require().Nil(err) + + chunk := newVarChunk(logger) + appender, err := chunk.Appender() + suite.Require().Nil(err) + + list := []string{"abc", "", "123456"} + t0 := time.Now().UnixNano() / 1000 + + for i, s := range list { + t := t0 + int64(i*1000) + appender.Append(t, s) + } + + iterChunk, err := FromData(logger, EncVariant, chunk.Bytes(), 0) + suite.Require().Nil(err) + + iter := iterChunk.Iterator() + i := 0 + for iter.Next() { + t, v := iter.AtString() + suite.Require().Equal(t, t0+int64(i*1000)) + suite.Require().Equal(v, list[i]) + fmt.Println("t, v: ", t, v) + i++ + } + + suite.Require().Nil(iter.Err()) + suite.Require().Equal(i, len(list)) +} + +func TestVarEncoderSuite(t *testing.T) { + suite.Run(t, new(testVarEncoderSuite)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go new file mode 100644 index 00000000..bfe9a5e3 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go @@ -0,0 +1,484 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. + +The code in this file was largely written by Damian Gryski as part of +https://github.com/dgryski/go-tsz and published under the license below. +and was later on modified by the Prometheus project in +https://github.com/prometheus/prometheus +Which are licensed under the Apache License, Version 2.0 (the "License"); + +Followed by modifications found here to suit Iguazio needs + +Copyright (c) 2015,2016 Damian Gryski +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +package chunkenc + +import ( + "math" + "math/bits" + "strconv" + + "github.com/nuclio/logger" +) + +// XORChunk holds XOR encoded sample data. +type XORChunk struct { + logger logger.Logger + + b *bstream + samples uint16 + offset int +} + +// NewXORChunk returns a new chunk with XOR encoding of the given size. +func newXORChunk(logger logger.Logger) Chunk { + //b := make([]byte, 32, 32) + return &XORChunk{logger: logger, b: newBWriter(256)} +} + +// Encoding returns the encoding type. +func (c *XORChunk) Encoding() Encoding { + return EncXOR +} + +// Bytes returns the underlying byte slice of the chunk. +func (c *XORChunk) Bytes() []byte { + //return c.b.getbytes() + return c.b.bytes() +} + +func (c *XORChunk) Clear() { + //c.b.rptr = c.b.getLen() + c.b.clear() +} + +// Appender implements the Chunk interface. +// new implementation, doesnt read the existing buffer, assume its new +func (c *XORChunk) Appender() (Appender, error) { + a := &xorAppender{logger: c.logger, c: c, b: c.b, samples: &c.samples} + if c.samples == 0 { + a.leading = 0xff + } + return a, nil +} + +/* old Appender TODO: do we need to append to existing buffer? maybe in stateless/slow clients +func (c *XORChunk) aAppender() (Appender, error) { + it := c.iterator() + + // To get an appender we must know the state it would have if we had + // appended all existing data from scratch. + // We iterate through the end and populate via the iterator's state. + for it.Next() { + } + if err := it.error(); err != nil { + return nil, err + } + + a := &xorAppender{ + c: c, + b: c.b, + samples: &c.samples, + t: it.t, + v: it.val, + tDelta: it.tDelta, + leading: it.leading, + trailing: it.trailing, + } + if c.samples == 0 { + a.leading = 0xff + } + return a, nil +} +*/ + +func (c *XORChunk) iterator() *xorIterator { + // Should iterators guarantee to act on a copy of the data so it doesn't lock append? + // When using striped locks to guard access to chunks, probably yes. + // Could only copy data if the chunk is not completed yet. + return &xorIterator{ + br: newBReader(c.b.bytes()), // TODO: may need merge + numTotal: c.samples, + } +} + +// Iterator implements the Chunk interface. +func (c *XORChunk) Iterator() Iterator { + return c.iterator() +} + +type xorAppender struct { + logger logger.Logger + + c *XORChunk + b *bstream + samples *uint16 + + t int64 + v float64 + tDelta uint64 + + leading uint8 + trailing uint8 + + isPreviousNewSeries bool +} + +func (a *xorAppender) Encoding() Encoding { + return a.Chunk().Encoding() +} + +func (a *xorAppender) Chunk() Chunk { + return a.c +} + +func (a *xorAppender) Append(t int64, vvar interface{}) { + var tDelta uint64 + num := *a.samples + + var v float64 + switch typedValue := vvar.(type) { + case int: + v = float64(typedValue) + case float64: + v = typedValue + default: + a.logger.Warn("Discarding sample {time: %d, value: %v}, as it's value is of incompatible data type. "+ + "Reason: expected 'float' actual '%T'.", t, vvar, vvar) + return + } + + // Do not append if sample is too old. + if t < a.t { + a.logger.Info("Discarding sample from %d, as it is older than the latest sample (%d).", t, a.t) + return + } + + // We write time deltas as 32 bits (for compression) if the delta is too large we'll start a new series + tDelta = uint64(t - a.t) + shouldStartNewSeries := num == 0 || bits.Len64(tDelta) >= 32 + + if shouldStartNewSeries { + // add a signature 11111 to indicate start of cseries in case we put few in the same chunk (append to existing) + a.b.writeBits(0x1f, 5) + a.b.writeBits(uint64(t), 51) + a.b.writeBits(math.Float64bits(v), 64) + a.isPreviousNewSeries = true + tDelta = 0 // saving time delta for the first element is redundant + } else if a.isPreviousNewSeries { + a.b.writeBits(tDelta, 32) + a.writeVDelta(v) + a.isPreviousNewSeries = false + } else { + dod := int64(tDelta - a.tDelta) + + // Gorilla has a max resolution of seconds, Prometheus milliseconds. + // Thus we use higher value range steps with larger bit size. + switch { + case dod == 0: + a.b.writeBit(zero) + case bitRange(dod, 14): + a.b.writeBits(0x02, 2) // '10' + a.b.writeBits(uint64(dod), 14) + case bitRange(dod, 17): + a.b.writeBits(0x06, 3) // '110' + a.b.writeBits(uint64(dod), 17) + case bitRange(dod, 20): + a.b.writeBits(0x0e, 4) // '1110' + a.b.writeBits(uint64(dod), 20) + default: + a.b.writeBits(0x1e, 5) // '11110' + a.b.writeBits(uint64(dod), 32) + } + + a.writeVDelta(v) + + } + + a.t = t + a.v = v + (*a.samples)++ + a.tDelta = tDelta + + a.b.padToByte() +} + +func bitRange(x int64, nbits uint8) bool { + return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1) +} + +func (a *xorAppender) writeVDelta(v float64) { + vDelta := math.Float64bits(v) ^ math.Float64bits(a.v) + + if vDelta == 0 { + a.b.writeBit(zero) + return + } + a.b.writeBit(one) + + leading := uint8(bits.LeadingZeros64(vDelta)) + trailing := uint8(bits.TrailingZeros64(vDelta)) + + // Clamp number of leading zeros to avoid overflow when encoding. + if leading >= 32 { + leading = 31 + } + + if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing { + a.b.writeBit(zero) + a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing)) + } else { + a.leading, a.trailing = leading, trailing + + a.b.writeBit(one) + a.b.writeBits(uint64(leading), 5) + + // Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have. + // Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0). + // So instead we write out a 0 and adjust it back to 64 on unpacking. + sigbits := 64 - leading - trailing + a.b.writeBits(uint64(sigbits), 6) + a.b.writeBits(vDelta>>trailing, int(sigbits)) + } +} + +type xorIterator struct { + br *bstream + numTotal uint16 + numRead uint16 + + t int64 + val float64 + + leading uint8 + trailing uint8 + + tDelta uint64 + err error +} + +func (it *xorIterator) At() (int64, float64) { + return it.t, it.val +} + +func (it *xorIterator) AtString() (int64, string) { + return it.t, strconv.FormatFloat(it.val, 'f', -1, 64) +} + +func (it *xorIterator) Err() error { + return it.err +} + +func (it *xorIterator) Next() bool { + if it.err != nil || len(it.br.stream) == 0 || (len(it.br.stream) == 1 && it.br.count == 0) { + return false + } + + if it.numRead == 0 { + t, err := it.br.readBits(56) // unlike Gorilla we read a 56bit cropped int (time in year 2000+ has 48bit) + //t, err := binary.ReadVarint(it.br) + if err != nil { + it.err = err + return false + } + t = t & ((0x80 << 40) - 1) + v, err := it.br.readBits(64) + if err != nil { + it.err = err + return false + } + it.t = int64(t) + it.val = math.Float64frombits(v) + + it.numRead++ + return true + } + + // check if this a starting from scratch, signature is 111110xx + isRestart := (it.br.PeekByte() & 0xfc) == 0xf8 + + if it.numRead == 1 && !isRestart { + tDelta, err := it.br.readBits(32) + if err != nil { + it.err = err + return false + } + it.tDelta = tDelta + it.t = it.t + int64(it.tDelta) + + rv := it.readValue() + it.br.padToByte() + + return rv + } + + var d byte + // read delta-of-delta + for i := 0; i < 5; i++ { + d <<= 1 + bit, err := it.br.readBit() + if err != nil { + it.err = err + return false + } + if bit == zero { + break + } + d |= 1 + } + var sz uint8 + var dod int64 + switch d { + case 0x00: + // dod == 0 + case 0x02: + sz = 14 + case 0x06: + sz = 17 + case 0x0e: + sz = 20 + case 0x1e: + bits, err := it.br.readBits(32) + if err != nil { + it.err = err + return false + } + + dod = int64(int32(bits)) + case 0x1f: + // added this case to allow append of a new Gorilla series on an existing chunk (restart from t0) + + t, err := it.br.readBits(51) + //t, err := binary.ReadVarint(it.br) + if err != nil { + it.err = err + return false + } + //t = t & ((0x80 << 40) - 1) + v, err := it.br.readBits(64) + if err != nil { + it.err = err + return false + } + it.t = int64(t) + it.val = math.Float64frombits(v) + + it.numRead = 1 + return true + } + + if sz != 0 { + bits, err := it.br.readBits(int(sz)) + if err != nil { + it.err = err + return false + } + if bits > (1 << (sz - 1)) { + // or something + bits = bits - (1 << sz) + } + dod = int64(bits) + } + + it.tDelta = uint64(int64(it.tDelta) + dod) + it.t = it.t + int64(it.tDelta) + + rv := it.readValue() + it.br.padToByte() + + return rv +} + +func (it *xorIterator) readValue() bool { + bit, err := it.br.readBit() + if err != nil { + it.err = err + return false + } + + if bit == zero { + // it.val = it.val + } else { + bit, err := it.br.readBit() + if err != nil { + it.err = err + return false + } + if bit == zero { + // reuse leading/trailing zero bits + // it.leading, it.trailing = it.leading, it.trailing + } else { + bits, err := it.br.readBits(5) + if err != nil { + it.err = err + return false + } + it.leading = uint8(bits) + + bits, err = it.br.readBits(6) + if err != nil { + it.err = err + return false + } + mbits := uint8(bits) + // 0 significant bits here means we overflowed and we actually need 64; see comment in encoder + if mbits == 0 { + mbits = 64 + } + it.trailing = 64 - it.leading - mbits + } + + mbits := int(64 - it.leading - it.trailing) + bits, err := it.br.readBits(mbits) + if err != nil { + it.err = err + return false + } + vbits := math.Float64bits(it.val) + vbits ^= (bits << it.trailing) + it.val = math.Float64frombits(vbits) + } + + it.numRead++ + return true +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go new file mode 100644 index 00000000..494f4e58 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go @@ -0,0 +1,458 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package config + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" + "sync" + + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "github.com/pkg/errors" +) + +var defaultDisableNginxMitigation = true + +const ( + V3ioConfigEnvironmentVariable = "V3IO_TSDB_CONFIG" + DefaultConfigurationFileName = "v3io-tsdb-config.yaml" + SchemaConfigFileName = ".schema" + + defaultNumberOfIngestWorkers = 1 + defaultNumberOfQueryWorkers = 8 + defaultBatchSize = 64 + defaultTimeoutInSeconds = 24 * 60 * 60 // 24 hours + + defaultMaximumSampleSize = 8 // bytes + defaultMaximumPartitionSize = 1700000 // 1.7MB + defaultMinimumChunkSize = 200 // bytes + defaultMaximumChunkSize = 32000 // bytes + + DefaultShardingBucketsCount = 8 + DefaultStorageClass = "local" + DefaultIngestionRate = "" + DefaultAggregates = "" // no aggregates by default + DefaultAggregationGranularity = "1h" + DefaultLayerRetentionTime = "1y" + DefaultSampleRetentionTime = 0 + DefaultLogLevel = "info" + DefaultVerboseLevel = "debug" + DefaultUseServerAggregateCoefficient = 3 + + // KV attribute names + MaxTimeAttrName = "_maxtime" + LabelSetAttrName = "_lset" + EncodingAttrName = "_enc" + OutOfOrderAttrName = "_ooo" + MetricNameAttrName = "_name" + ObjectNameAttrName = "__name" + ChunkAttrPrefix = "_v" + AggregateAttrPrefix = "_v_" + MtimeSecsAttributeName = "__mtime_secs" + MtimeNSecsAttributeName = "__mtime_nsecs" + + PrometheusMetricNameAttribute = "__name__" + + NamesDirectory = "names" +) + +type BuildInfo struct { + BuildTime string `json:"buildTime,omitempty"` + Os string `json:"os,omitempty"` + Architecture string `json:"architecture,omitempty"` + Version string `json:"version,omitempty"` + CommitHash string `json:"commitHash,omitempty"` + Branch string `json:"branch,omitempty"` +} + +func (bi *BuildInfo) String() string { + return fmt.Sprintf("Build time: %s\nOS: %s\nArchitecture: %s\nVersion: %s\nCommit Hash: %s\nBranch: %s\n", + bi.BuildTime, + bi.Os, + bi.Architecture, + bi.Version, + bi.CommitHash, + bi.Branch) +} + +var ( + // Note, following variables set by make + buildTime, osys, architecture, version, commitHash, branch string + + instance *V3ioConfig + once sync.Once + failure error + + BuildMetadta = &BuildInfo{ + BuildTime: buildTime, + Os: osys, + Architecture: architecture, + Version: version, + CommitHash: commitHash, + Branch: branch, + } +) + +func Error() error { + return failure +} + +type V3ioConfig struct { + // V3IO TSDB connection information - web-gateway service endpoint, + // TSDB data container, relative TSDB table path within the container, and + // authentication credentials for the web-gateway service + WebAPIEndpoint string `json:"webApiEndpoint"` + Container string `json:"container"` + TablePath string `json:"tablePath"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + AccessKey string `json:"accessKey,omitempty"` + + HTTPTimeout string `json:"httpTimeout,omitempty"` + + // Disabled = true disables the V3IO TSDB configuration in Prometheus and + // enables the internal Prometheus TSDB instead + Disabled bool `json:"disabled,omitempty"` + // Log level - "debug" | "info" | "warn" | "error" + LogLevel string `json:"logLevel,omitempty"` + // Number of parallel V3IO worker routines + Workers int `json:"workers"` + // Number of parallel V3IO worker routines for queries; + // default = the minimum value between 8 and Workers + QryWorkers int `json:"qryWorkers"` + // Override last chunk; by default, an append from the last point is attempted upon restart + OverrideOld bool `json:"overrideOld"` + // Default timeout duration, in seconds; default = 3,600 seconds (1 hour) + DefaultTimeoutInSeconds int `json:"timeout,omitempty"` + // Size of the samples batch to use during ingestion + BatchSize int `json:"batchSize,omitempty"` + // Maximum sample size, in bytes (for the worst compression scenario) + MaximumSampleSize int `json:"maximumSampleSize,omitempty"` + // Maximum size of a partition object + MaximumPartitionSize int `json:"maximumPartitionSize,omitempty"` + // Minimum chunk size, in bytes (for the best compression scenario) + MinimumChunkSize int `json:"minimumChunkSize,omitempty"` + // Maximum chunk size, in bytes (for the worst compression scenario) + MaximumChunkSize int `json:"maximumChunkSize,omitempty"` + // Number of sharding buckets + ShardingBucketsCount int `json:"shardingBucketsCount,omitempty"` + // Metrics-reporter configuration + MetricsReporter MetricsReporterConfig `json:"performance,omitempty"` + // Don't aggregate from raw chunks, for use when working as a Prometheus + // TSDB library + DisableClientAggr bool `json:"disableClientAggr,omitempty"` + // Build Info + BuildInfo *BuildInfo `json:"buildInfo,omitempty"` + // Override nginx bug + DisableNginxMitigation *bool `json:"disableNginxMitigation,omitempty"` + // explicitly always use client aggregation + UsePreciseAggregations bool `json:"usePreciseAggregations,omitempty"` + // Coefficient to decide whether or not to use server aggregates optimization + // use server aggregations if ` / > UseServerAggregateCoefficient` + UseServerAggregateCoefficient int `json:"useServerAggregateCoefficient,omitempty"` + LoadPartitionsFromSchemaAttr bool `json:"loadPartitionsFromSchemaAttr,omitempty"` + RequestChanLength int `json:"RequestChanLength,omitempty"` +} + +type MetricsReporterConfig struct { + // Report on shutdown (Boolean) + ReportOnShutdown bool `json:"reportOnShutdown,omitempty"` + // Output destination - "stdout" or "stderr" + Output string `json:"output"` + // Report periodically (Boolean) + ReportPeriodically bool `json:"reportPeriodically,omitempty"` + // Interval between consequence reports (in seconds) + RepotInterval int `json:"reportInterval"` +} + +type Rollup struct { + Aggregates []string `json:"aggregates"` + AggregationGranularity string `json:"aggregationGranularity"` + // Storage class for the aggregates and sample chunks - "cloud" | "local" + StorageClass string `json:"storageClass"` + // [FUTURE] Sample retention period, in hours. 0 means no need to save samples. + SampleRetention int `json:"sampleRetention"` + // Layer retention time, in months ('m'), days ('d'), or hours ('h'). + // Format: "[0-9]+[hmd]". For example: "3h", "7d", "1m" + LayerRetentionTime string `json:"layerRetentionTime"` +} + +type PreAggregate struct { + Labels []string `json:"labels"` + Granularity string `json:"granularity"` + Aggregates []string `json:"aggregates"` +} + +type TableSchema struct { + Version int `json:"version"` + RollupLayers []Rollup `json:"rollupLayers"` + ShardingBucketsCount int `json:"shardingBucketsCount"` + PartitionerInterval string `json:"partitionerInterval"` + ChunckerInterval string `json:"chunckerInterval"` + PreAggregates []PreAggregate `json:"preAggregates"` +} + +type PartitionSchema struct { + Version int `json:"version"` + Aggregates []string `json:"aggregates"` + AggregationGranularity string `json:"aggregationGranularity"` + StorageClass string `json:"storageClass"` + SampleRetention int `json:"sampleRetention"` + PartitionerInterval string `json:"partitionerInterval"` + ChunckerInterval string `json:"chunckerInterval"` +} + +type Partition struct { + StartTime int64 `json:"startTime"` + SchemaInfo PartitionSchema `json:"schemaInfo"` +} + +type SchemaField struct { + Name string `json:"name"` + Type string `json:"type"` + Nullable bool `json:"nullable"` + Items string `json:"items,omitempty"` +} + +type Schema struct { + TableSchemaInfo TableSchema `json:"tableSchemaInfo"` + PartitionSchemaInfo PartitionSchema `json:"partitionSchemaInfo"` + Partitions []*Partition `json:"partitions"` + Fields []SchemaField `json:"fields"` +} + +type MetricConfig struct { + Rollups string `json:"rollups,omitempty"` + RollupMin int `json:"rollupMin,omitempty"` + DelRawSamples bool `json:"delRawSamples,omitempty"` + // Dimensions to pre aggregate (vertical aggregation) + PreAggragate []string `json:"preAggragate,omitempty"` +} + +// TODO: add alerts config (name, match expr, for, lables, annotations) + +func GetOrDefaultConfig() (*V3ioConfig, error) { + return GetOrLoadFromFile("") +} + +func GetOrLoadFromFile(path string) (*V3ioConfig, error) { + once.Do(func() { + instance, failure = loadConfig(path) + return + }) + + return instance, failure +} + +func GetOrLoadFromData(data []byte) (*V3ioConfig, error) { + once.Do(func() { + instance, failure = loadFromData(data) + return + }) + + return instance, failure +} + +// Update the defaults when using a configuration structure +func GetOrLoadFromStruct(cfg *V3ioConfig) (*V3ioConfig, error) { + once.Do(func() { + initDefaults(cfg) + instance = cfg + return + }) + + return instance, nil +} + +// Eagerly reloads TSDB configuration. Note: not thread-safe +func UpdateConfig(path string) { + instance, failure = loadConfig(path) +} + +// Update the defaults when using an existing configuration structure (custom configuration) +func WithDefaults(cfg *V3ioConfig) *V3ioConfig { + initDefaults(cfg) + return cfg +} + +// Create new configuration structure instance based on given instance. +// All matching attributes within result structure will be overwritten with values of newCfg +func (config *V3ioConfig) Merge(newCfg *V3ioConfig) (*V3ioConfig, error) { + resultCfg, err := config.merge(newCfg) + if err != nil { + return nil, err + } + + return resultCfg, nil +} + +func (config V3ioConfig) String() string { + if config.Password != "" { + config.Password = "SANITIZED" + } + if config.AccessKey != "" { + config.AccessKey = "SANITIZED" + } + + sanitizedConfigJSON, err := json.Marshal(&config) + if err == nil { + return string(sanitizedConfigJSON) + } + return fmt.Sprintf("Unable to read config: %v", err) +} + +func (*V3ioConfig) merge(cfg *V3ioConfig) (*V3ioConfig, error) { + mergedCfg := V3ioConfig{} + if err := mergo.Merge(&mergedCfg, cfg, mergo.WithOverride); err != nil { + return nil, errors.Wrap(err, "Unable to merge configurations.") + } + return &mergedCfg, nil +} + +func loadConfig(path string) (*V3ioConfig, error) { + + var resolvedPath string + + if strings.TrimSpace(path) != "" { + resolvedPath = path + } else { + envPath := os.Getenv(V3ioConfigEnvironmentVariable) + if envPath != "" { + resolvedPath = envPath + } + } + + if resolvedPath == "" { + resolvedPath = DefaultConfigurationFileName + } + + var data []byte + if _, err := os.Stat(resolvedPath); err != nil { + if os.IsNotExist(err) { + data = []byte{} + } else { + return nil, errors.Wrap(err, "Failed to read the TSDB configuration.") + } + } else { + data, err = ioutil.ReadFile(resolvedPath) + if err != nil { + return nil, err + } + + if len(data) == 0 { + return nil, errors.Errorf("Configuration file '%s' exists but its content is invalid.", resolvedPath) + } + } + + return loadFromData(data) +} + +func loadFromData(data []byte) (*V3ioConfig, error) { + cfg := V3ioConfig{ + BuildInfo: BuildMetadta, + } + err := yaml.Unmarshal(data, &cfg) + + if err != nil { + return nil, err + } + + initDefaults(&cfg) + + return &cfg, err +} + +func initDefaults(cfg *V3ioConfig) { + if cfg.BuildInfo == nil { + cfg.BuildInfo = BuildMetadta + } + + // Initialize the default number of workers + if cfg.Workers == 0 { + cfg.Workers = defaultNumberOfIngestWorkers + } + + // Initialize the default number of Query workers if not set to Min(8,Workers) + if cfg.QryWorkers == 0 { + if cfg.Workers < defaultNumberOfQueryWorkers { + cfg.QryWorkers = cfg.Workers + } else { + cfg.QryWorkers = defaultNumberOfQueryWorkers + } + } + + // Initialize the default batch size + if cfg.BatchSize <= 0 { + cfg.BatchSize = defaultBatchSize + } + + if cfg.DefaultTimeoutInSeconds == 0 { + cfg.DefaultTimeoutInSeconds = int(defaultTimeoutInSeconds) + } + + if cfg.MaximumChunkSize == 0 { + cfg.MaximumChunkSize = defaultMaximumChunkSize + } + + if cfg.MinimumChunkSize == 0 { + cfg.MinimumChunkSize = defaultMinimumChunkSize + } + + if cfg.MaximumSampleSize == 0 { + cfg.MaximumSampleSize = defaultMaximumSampleSize + } + + if cfg.MaximumPartitionSize == 0 { + cfg.MaximumPartitionSize = defaultMaximumPartitionSize + } + + if cfg.ShardingBucketsCount == 0 { + cfg.ShardingBucketsCount = DefaultShardingBucketsCount + } + + if cfg.UseServerAggregateCoefficient == 0 { + cfg.UseServerAggregateCoefficient = DefaultUseServerAggregateCoefficient + } + + if cfg.DisableNginxMitigation == nil { + cfg.DisableNginxMitigation = &defaultDisableNginxMitigation + } + + if cfg.WebAPIEndpoint == "" { + cfg.WebAPIEndpoint = os.Getenv("V3IO_API") + } + + if cfg.AccessKey == "" { + cfg.AccessKey = os.Getenv("V3IO_ACCESS_KEY") + } + + if cfg.Username == "" { + cfg.Username = os.Getenv("V3IO_USERNAME") + } + + if cfg.Password == "" { + cfg.Password = os.Getenv("V3IO_PASSWORD") + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config_test.go new file mode 100644 index 00000000..e7836faf --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config_test.go @@ -0,0 +1,30 @@ +// +build unit + +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSanitation(tst *testing.T) { + config := &V3ioConfig{ + AccessKey: "12345", + Username: "moses", + Password: "bla-bla-password", + } + + configAsString := config.String() + + // Name should not be sanitized + assert.Contains(tst, configAsString, "moses") + + // sensitive fields must be sanitized + assert.NotContains(tst, configAsString, "12345") + assert.NotContains(tst, configAsString, "bla-bla-password") + + // original object should not be changed + assert.Equal(tst, config.AccessKey, "12345") + assert.Equal(tst, config.Password, "bla-bla-password") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go new file mode 100644 index 00000000..2d50a450 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go @@ -0,0 +1,177 @@ +package formatter + +import ( + "encoding/csv" + "fmt" + "io" + "strconv" + "time" + + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type textFormatter struct { + baseFormatter +} + +func (f textFormatter) Write(out io.Writer, set utils.SeriesSet) error { + + for set.Next() { + series := set.At() + name, lbls := labelsToStr(series.Labels()) + fmt.Fprintf(out, "Name: %s Labels: %s\n", name, lbls) + iter := series.Iterator() + for iter.Next() { + if iter.Encoding() == chunkenc.EncXOR { + t, v := iter.At() + fmt.Fprintf(out, " %s v=%.2f\n", f.timeString(t), v) + } else { + t, v := iter.AtString() + fmt.Fprintf(out, " %s v=%v\n", f.timeString(t), v) + } + } + + if iter.Err() != nil { + return iter.Err() + } + + fmt.Fprintln(out, "") + } + + if set.Err() != nil { + return set.Err() + } + + return nil +} + +func (f textFormatter) timeString(t int64) string { + if f.cfg.TimeFormat == "" { + return strconv.Itoa(int(t)) + } + return time.Unix(t/1000, 0).Format(f.cfg.TimeFormat) +} + +type csvFormatter struct { + baseFormatter +} + +func (f csvFormatter) Write(out io.Writer, set utils.SeriesSet) error { + + writer := csv.NewWriter(out) + for set.Next() { + + series := set.At() + name, labelStr := labelsToStr(series.Labels()) + + iter := series.Iterator() + for iter.Next() { + if iter.Encoding() == chunkenc.EncXOR { + t, v := iter.At() + _ = writer.Write([]string{name, labelStr, fmt.Sprintf("%.6f", v), strconv.FormatInt(t, 10)}) + } else { + t, v := iter.AtString() + _ = writer.Write([]string{name, labelStr, fmt.Sprintf("%v", v), strconv.FormatInt(t, 10)}) + } + } + + if iter.Err() != nil { + return iter.Err() + } + } + + if set.Err() != nil { + return set.Err() + } + + writer.Flush() + return nil + +} + +type simpleJSONFormatter struct { + baseFormatter +} + +const metricTemplate = ` + { "target": "%s{%s}", + "datapoints": [%s] + }` + +func (f simpleJSONFormatter) Write(out io.Writer, set utils.SeriesSet) error { + + firstSeries := true + output := "[" + + for set.Next() { + series := set.At() + name, labelStr := labelsToStr(series.Labels()) + datapoints := "" + + iter := series.Iterator() + firstItem := true + for iter.Next() { + + if !firstItem { + datapoints = datapoints + "," + } + if iter.Encoding() == chunkenc.EncXOR { + t, v := iter.At() + datapoints = datapoints + fmt.Sprintf("[%.6f,%d]", v, t) + } else { + t, v := iter.AtString() + datapoints = datapoints + fmt.Sprintf("[\"%v\",%d]", v, t) + } + + firstItem = false + } + + if iter.Err() != nil { + return iter.Err() + } + + if !firstSeries { + output = output + "," + } + output = output + fmt.Sprintf(metricTemplate, name, labelStr, datapoints) + firstSeries = false + } + + if set.Err() != nil { + return set.Err() + } + + _, err := out.Write([]byte(output + "\n]")) + + return err +} + +type testFormatter struct { + baseFormatter +} + +func (f testFormatter) Write(out io.Writer, set utils.SeriesSet) error { + var count int + for set.Next() { + count++ + series := set.At() + iter := series.Iterator() + var i int + for iter.Next() { + i++ + } + + if iter.Err() != nil { + return errors.Errorf("error reading point for label set: %v, at index: %v, error: %v", series.Labels(), i, iter.Err()) + } + } + + if set.Err() != nil { + return set.Err() + } + + fmt.Fprintf(out, "got %v unique label sets\n", count) + return nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go new file mode 100644 index 00000000..9c1f0923 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go @@ -0,0 +1,56 @@ +package formatter + +import ( + "fmt" + "io" + "strings" + "time" + + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const DefaultOutputFormat = "text" + +func NewFormatter(format string, cfg *Config) (Formatter, error) { + if cfg == nil { + cfg = &Config{TimeFormat: time.RFC3339} + } + switch format { + case "", DefaultOutputFormat: + return textFormatter{baseFormatter{cfg: cfg}}, nil + case "csv": + return csvFormatter{baseFormatter{cfg: cfg}}, nil + case "json": + return simpleJSONFormatter{baseFormatter{cfg: cfg}}, nil + case "none": + return testFormatter{baseFormatter{cfg: cfg}}, nil + + default: + return nil, fmt.Errorf("unknown formatter type %s", format) + } +} + +type Formatter interface { + Write(out io.Writer, set utils.SeriesSet) error +} + +type Config struct { + TimeFormat string +} + +type baseFormatter struct { + cfg *Config +} + +func labelsToStr(labels utils.Labels) (string, string) { + name := "" + var lbls []string + for _, lbl := range labels { + if lbl.Name == "__name__" { + name = lbl.Value + } else { + lbls = append(lbls, lbl.Name+"="+lbl.Value) + } + } + return name, strings.Join(lbls, ",") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go new file mode 100644 index 00000000..aaf716cc --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go @@ -0,0 +1,753 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package partmgr + +import ( + "encoding/json" + "fmt" + "math" + "path" + "sort" + "strconv" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const ( + partitionAttributePrefix = "p" +) + +// Create a new partition manager +func NewPartitionMngr(schemaConfig *config.Schema, cont v3io.Container, v3ioConfig *config.V3ioConfig) (*PartitionManager, error) { + currentPartitionInterval, err := utils.Str2duration(schemaConfig.PartitionSchemaInfo.PartitionerInterval) + if err != nil { + return nil, err + } + newMngr := &PartitionManager{schemaConfig: schemaConfig, cyclic: false, container: cont, currentPartitionInterval: currentPartitionInterval, v3ioConfig: v3ioConfig} + err = newMngr.updatePartitionsFromSchema(schemaConfig, nil) + if err != nil { + return nil, err + } + return newMngr, nil +} + +type PartitionManager struct { + mtx sync.RWMutex + schemaConfig *config.Schema + schemaMtimeSecs int + schemaMtimeNanosecs int + headPartition *DBPartition + partitions []*DBPartition + cyclic bool + container v3io.Container + currentPartitionInterval int64 //TODO update on schema changes + v3ioConfig *config.V3ioConfig +} + +func (p *PartitionManager) GetSchemaFilePath() string { + return path.Join(p.Path(), config.SchemaConfigFileName) +} + +func (p *PartitionManager) GetPartitionsTablePath() string { + return path.Join(p.Path(), "partitions") +} + +func (p *PartitionManager) Path() string { + return p.v3ioConfig.TablePath +} + +func (p *PartitionManager) GetPartitionsPaths() []string { + var paths []string + for _, part := range p.partitions { + paths = append(paths, part.GetTablePath()) + } + return paths +} + +func (p *PartitionManager) GetConfig() *config.Schema { + return p.schemaConfig +} + +func (p *PartitionManager) Init() error { + return nil +} + +func (p *PartitionManager) TimeToPart(t int64) (*DBPartition, error) { + if p.headPartition == nil { + // Rounding t to the nearest PartitionInterval multiple + _, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + return p.headPartition, err + } + if t >= p.headPartition.startTime { + if (t - p.headPartition.startTime) >= p.currentPartitionInterval { + _, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + if err != nil { + return nil, err + } + } + return p.headPartition, nil + } + // Iterate backwards; ignore the last element as it's the head partition + for i := len(p.partitions) - 2; i >= 0; i-- { + if t >= p.partitions[i].startTime { + if t < p.partitions[i].GetEndTime() { + return p.partitions[i], nil + } + part, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + if err != nil { + return nil, err + } + return part, nil + } + } + head := p.headPartition + part, _ := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + p.headPartition = head + return part, nil +} + +func (p *PartitionManager) createAndUpdatePartition(t int64) (*DBPartition, error) { + time := t & 0x7FFFFFFFFFFFFFF0 + partPath := path.Join(p.Path(), strconv.FormatInt(time/1000, 10)) + "/" + partition, err := NewDBPartition(p, time, partPath) + if err != nil { + return nil, err + } + p.currentPartitionInterval = partition.partitionInterval + + schemaPartition := &config.Partition{StartTime: partition.startTime, SchemaInfo: p.schemaConfig.PartitionSchemaInfo} + if p.headPartition == nil || time > p.headPartition.startTime { + p.headPartition = partition + p.partitions = append(p.partitions, partition) + p.schemaConfig.Partitions = append(p.schemaConfig.Partitions, schemaPartition) + } else { + for i, part := range p.partitions { + if part.startTime > time { + p.partitions = append(p.partitions, nil) + copy(p.partitions[i+1:], p.partitions[i:]) + p.partitions[i] = partition + + p.schemaConfig.Partitions = append(p.schemaConfig.Partitions, nil) + copy(p.schemaConfig.Partitions[i+1:], p.schemaConfig.Partitions[i:]) + p.schemaConfig.Partitions[i] = schemaPartition + break + } + } + } + + err = p.updateSchema() + return partition, err +} + +func (p *PartitionManager) updateSchema() error { + var outerError error + metricReporter := performance.ReporterInstanceFromConfig(p.v3ioConfig) + metricReporter.WithTimer("UpdateSchemaTimer", func() { + // updating schema version and copying partitions to kv table. + p.schemaConfig.TableSchemaInfo.Version = schema.Version + + data, err := json.Marshal(p.schemaConfig) + if err != nil { + outerError = errors.Wrap(err, "Failed to update a new partition in the schema file.") + return + } + schemaFilePath := p.GetSchemaFilePath() + if p.container != nil { // Tests use case only + err = p.container.PutObjectSync(&v3io.PutObjectInput{Path: schemaFilePath, Body: data}) + if err != nil { + outerError = err + return + } + attributes := make(map[string]interface{}, len(p.partitions)) + for _, part := range p.partitions { + marshalledPartition, err := json.Marshal(part.ToMap()) + if err != nil { + outerError = err + return + } + attributes[part.GetPartitionAttributeName()] = marshalledPartition + } + + input := &v3io.PutItemInput{Path: schemaFilePath, Attributes: attributes} + _, err := p.container.PutItemSync(input) + + if err != nil { + outerError = errors.Wrap(err, "failed to update partitions table.") + return + } + } + }) + + return outerError +} + +func (p *PartitionManager) DeletePartitionsFromSchema(partitionsToDelete []*DBPartition) error { + for i := len(p.partitions) - 1; i >= 0; i-- { + for _, partToDelete := range partitionsToDelete { + if p.partitions[i].startTime == partToDelete.startTime { + p.partitions = append(p.partitions[:i], p.partitions[i+1:]...) + break + } + } + + } + for i := len(p.schemaConfig.Partitions) - 1; i >= 0; i-- { + for _, partToDelete := range partitionsToDelete { + if p.schemaConfig.Partitions[i].StartTime == partToDelete.startTime { + p.schemaConfig.Partitions = append(p.schemaConfig.Partitions[:i], p.schemaConfig.Partitions[i+1:]...) + break + } + } + } + + // Delete from partitions KV table + if p.container != nil { // Tests use case only + deletePartitionExpression := strings.Builder{} + for _, partToDelete := range partitionsToDelete { + deletePartitionExpression.WriteString("delete(") + deletePartitionExpression.WriteString(partToDelete.GetPartitionAttributeName()) + deletePartitionExpression.WriteString(");") + } + expression := deletePartitionExpression.String() + _, err := p.container.UpdateItemSync(&v3io.UpdateItemInput{Path: p.GetSchemaFilePath(), Expression: &expression}) + if err != nil { + return err + } + } + + return p.updateSchema() +} + +func (p *PartitionManager) ReadAndUpdateSchema() (err error) { + metricReporter, err := performance.DefaultReporterInstance() + if err != nil { + err = errors.Wrap(err, "Unable to initialize the performance-metrics reporter.") + return + } + + schemaFilePath := p.GetSchemaFilePath() + if err != nil { + err = errors.Wrap(err, "Failed to create timer ReadAndUpdateSchemaTimer.") + return + } + schemaInfoResp, err := p.container.GetItemSync(&v3io.GetItemInput{Path: schemaFilePath, AttributeNames: []string{"**"}}) + if err != nil { + err = errors.Wrapf(err, "Failed to read schema at path '%s'.", schemaFilePath) + return + } + + schemaGetItemResponse := schemaInfoResp.Output.(*v3io.GetItemOutput) + mtimeSecs, err := schemaGetItemResponse.Item.GetFieldInt("__mtime_secs") + if err != nil { + err = errors.Wrapf(err, "Failed to get start time (mtime) in seconds from the schema at '%s'.", schemaFilePath) + return + } + mtimeNsecs, err := schemaGetItemResponse.Item.GetFieldInt("__mtime_nsecs") + if err != nil { + err = errors.Wrapf(err, "Failed to get start time (mtime) in nanoseconds from the schema at '%s'.", schemaFilePath) + return + } + + // Get schema only if the schema has changed + if mtimeSecs > p.schemaMtimeSecs || (mtimeSecs == p.schemaMtimeSecs && mtimeNsecs > p.schemaMtimeNanosecs) { + p.schemaMtimeSecs = mtimeSecs + p.schemaMtimeNanosecs = mtimeNsecs + + metricReporter.WithTimer("ReadAndUpdateSchemaTimer", func() { + err = p.updatePartitionsFromSchema(nil, schemaGetItemResponse) + return + }) + } + return +} + +func (p *PartitionManager) updatePartitionsFromSchema(schemaConfig *config.Schema, schemaGetItemResponse *v3io.GetItemOutput) error { + var currentSchemaVersion int + if schemaConfig == nil { + currentSchemaVersion = p.schemaConfig.TableSchemaInfo.Version + } else { + currentSchemaVersion = schemaConfig.TableSchemaInfo.Version + } + + if currentSchemaVersion == 4 && p.v3ioConfig.LoadPartitionsFromSchemaAttr { + return p.newLoadPartitions(schemaGetItemResponse) + } + + return p.oldLoadPartitions(schemaConfig) +} + +func (p *PartitionManager) oldLoadPartitions(schema *config.Schema) error { + if schema == nil { + schemaFilePath := p.GetSchemaFilePath() + resp, innerError := p.container.GetObjectSync(&v3io.GetObjectInput{Path: schemaFilePath}) + if innerError != nil { + return errors.Wrapf(innerError, "Failed to read schema at path '%s'.", schemaFilePath) + } + + schema = &config.Schema{} + innerError = json.Unmarshal(resp.Body(), schema) + if innerError != nil { + return errors.Wrapf(innerError, "Failed to unmarshal schema at path '%s'.", schemaFilePath) + } + } + + p.partitions = []*DBPartition{} + for _, part := range schema.Partitions { + partPath := path.Join(p.Path(), strconv.FormatInt(part.StartTime/1000, 10)) + "/" + newPart, err := NewDBPartition(p, part.StartTime, partPath) + if err != nil { + return err + } + p.partitions = append(p.partitions, newPart) + if p.headPartition == nil { + p.headPartition = newPart + } else if p.headPartition.startTime < newPart.startTime { + p.headPartition = newPart + } + } + return nil +} + +func (p *PartitionManager) newLoadPartitions(schemaAttributesResponse *v3io.GetItemOutput) error { + if p.container == nil { // Tests use case only + return nil + } + + if schemaAttributesResponse == nil { + schemaFilePath := p.GetSchemaFilePath() + schemaInfoResp, err := p.container.GetItemSync(&v3io.GetItemInput{Path: schemaFilePath, AttributeNames: []string{"*"}}) + if err != nil { + return errors.Wrapf(err, "Failed to read schema at path '%s'.", schemaFilePath) + } + + schemaAttributesResponse = schemaInfoResp.Output.(*v3io.GetItemOutput) + } + + p.partitions = []*DBPartition{} + for partitionStartTime, partitionAttrBlob := range schemaAttributesResponse.Item { + // Only process "partition" attributes + if !strings.HasPrefix(partitionStartTime, partitionAttributePrefix) { + continue + } + intStartTime, err := strconv.ParseInt(partitionStartTime[1:], 10, 64) + if err != nil { + return errors.Wrapf(err, "invalid partition name '%v'", partitionStartTime) + } + + partPath := path.Join(p.Path(), strconv.FormatInt(intStartTime/1000, 10)) + "/" + + partitionAttr := make(map[string]interface{}, 5) + err = json.Unmarshal(partitionAttrBlob.([]byte), &partitionAttr) + if err != nil { + return err + } + newPart, err := NewDBPartitionFromMap(p, intStartTime, partPath, partitionAttr) + if err != nil { + return err + } + p.partitions = append(p.partitions, newPart) + if p.headPartition == nil { + p.headPartition = newPart + } else if p.headPartition.startTime < newPart.startTime { + p.headPartition = newPart + } + } + + sort.SliceStable(p.partitions, func(i, j int) bool { + return p.partitions[i].startTime < p.partitions[j].startTime + }) + + return nil +} + +//if inclusive is true than partial partitions (not fully in range) will be retireved as well +func (p *PartitionManager) PartsForRange(mint, maxt int64, inclusive bool) []*DBPartition { + var parts []*DBPartition + for _, part := range p.partitions { + if (mint < part.GetStartTime() && maxt > part.GetEndTime()) || (inclusive && (part.InRange(mint) || part.InRange(maxt))) { + parts = append(parts, part) + } + } + return parts +} + +type DBPartition struct { + manager *PartitionManager + path string // Full path to the partition within the DB + startTime int64 // Start time + partitionInterval int64 // Number of msecs stored in the partition + chunkInterval int64 // Number of msecs stored in each chunk + prefix string // Path prefix + retentionDays int // Keep samples for N hours + defaultRollups aggregate.AggrType // Default aggregation functions to apply on sample update + rollupTime int64 // Time range per aggregation bucket + rollupBuckets int // Total number of aggregation buckets per partition +} + +// Create and initialize a new partition +func NewDBPartition(pmgr *PartitionManager, startTime int64, path string) (*DBPartition, error) { + rollupTime, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.AggregationGranularity) + if err != nil { + return nil, err + } + partitionInterval, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.PartitionerInterval) + if err != nil { + return nil, err + } + chunkInterval, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.ChunckerInterval) + if err != nil { + return nil, err + } + newPart := DBPartition{ + manager: pmgr, + path: path, + startTime: startTime, + partitionInterval: partitionInterval, + chunkInterval: chunkInterval, + prefix: "", + retentionDays: pmgr.schemaConfig.PartitionSchemaInfo.SampleRetention, + rollupTime: rollupTime, + } + + aggrType, _, err := aggregate.AggregatesFromStringListWithCount(pmgr.schemaConfig.PartitionSchemaInfo.Aggregates) + if err != nil { + return nil, err + } + newPart.defaultRollups = aggrType + if rollupTime != 0 { + newPart.rollupBuckets = int(math.Ceil(float64(partitionInterval) / float64(rollupTime))) + } + + return &newPart, nil +} + +// Create and initialize a new partition +func NewDBPartitionFromMap(pmgr *PartitionManager, startTime int64, path string, item v3io.Item) (*DBPartition, error) { + rollupTime, err := item.GetFieldInt("rollupTime") + if err != nil { + return nil, fmt.Errorf("failed to parse rollupTime for partition: %v, rollup: %v", startTime, item.GetField("rollupTime")) + } + + partitionInterval, err := item.GetFieldInt("partitionInterval") + if err != nil { + return nil, fmt.Errorf("failed to parse partitionInterval for partition: %v, interval: %v", startTime, item.GetField("partitionInterval")) + } + + chunkInterval, err := item.GetFieldInt("chunkInterval") + if err != nil { + return nil, fmt.Errorf("failed to parse chunk Interval for partition: %v, interval: %v", startTime, item.GetField("chunkInterval")) + } + + retention, err := item.GetFieldInt("retentionDays") + if err != nil { + return nil, errors.Wrapf(err, "failed to parse retention days for partition: %v, retention: %v", startTime, item.GetField("retentionDays")) + } + + stringAggregates, err := item.GetFieldString("aggregates") + if err != nil { + return nil, errors.Wrapf(err, "failed to parse aggregates for partition: %v, aggregates: %v", startTime, item.GetField("aggregates")) + } + mask, _, err := aggregate.AggregatesFromStringListWithCount(strings.Split(stringAggregates, ",")) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse aggregates from string for partition: %v, aggregates: %v", startTime, stringAggregates) + } + + newPart := DBPartition{ + manager: pmgr, + path: path, + startTime: startTime, + partitionInterval: int64(partitionInterval), + chunkInterval: int64(chunkInterval), + prefix: "", + retentionDays: retention, + rollupTime: int64(rollupTime), + defaultRollups: mask, + } + + if rollupTime != 0 { + newPart.rollupBuckets = int(math.Ceil(float64(partitionInterval) / float64(rollupTime))) + } + + return &newPart, nil +} + +func (p *DBPartition) PreAggregates() []config.PreAggregate { + return p.manager.GetConfig().TableSchemaInfo.PreAggregates +} + +func (p *DBPartition) IsCyclic() bool { + return p.manager.cyclic +} + +// Return the time range covered by a single chunk (the chunk interval) +func (p *DBPartition) TimePerChunk() int64 { + return p.chunkInterval +} + +func (p *DBPartition) NextPart(t int64) (*DBPartition, error) { + return p.manager.TimeToPart(t) +} + +func (p *DBPartition) GetStartTime() int64 { + return p.startTime +} + +func (p *DBPartition) GetEndTime() int64 { + return p.startTime + p.partitionInterval - 1 +} + +// Return the path to this partition's TSDB table +func (p *DBPartition) GetTablePath() string { + return p.path +} + +// Return the name of this partition's attribute name +func (p *DBPartition) GetPartitionAttributeName() string { + return fmt.Sprintf("%v%v", partitionAttributePrefix, strconv.FormatInt(p.startTime, 10)) +} + +// Return a list of sharding keys matching the given item name +func (p *DBPartition) GetShardingKeys(name string) []string { + shardingKeysNum := p.manager.schemaConfig.TableSchemaInfo.ShardingBucketsCount + var res = make([]string, 0, shardingKeysNum) + for i := 0; i < shardingKeysNum; i++ { + // Trailing period ('.') for range-scan queries + res = append(res, fmt.Sprintf("%s_%x.", name, i)) + } + + return res +} + +// Return the full path to the specified metric item +func (p *DBPartition) GetMetricPath(name string, hash uint64, labelNames []string, isAggr bool) string { + agg := "" + if isAggr { + if len(labelNames) == 0 { + agg = "agg/" + } else { + var namelessLabelNames []string + for _, l := range labelNames { + if l != config.PrometheusMetricNameAttribute { + namelessLabelNames = append(namelessLabelNames, l) + } + } + agg = fmt.Sprintf("agg/%s/", strings.Join(namelessLabelNames, ",")) + } + } + return fmt.Sprintf("%s%s%s_%x.%016x", p.path, agg, name, int(hash%uint64(p.GetHashingBuckets())), hash) +} + +func (p *DBPartition) AggrType() aggregate.AggrType { + return p.defaultRollups +} + +func (p *DBPartition) AggrBuckets() int { + return p.rollupBuckets +} + +func (p *DBPartition) RollupTime() int64 { + return p.rollupTime +} + +// Return the aggregation bucket ID for the specified time +func (p *DBPartition) Time2Bucket(t int64) int { + if p.rollupTime == 0 { + return 0 + } + if t > p.GetEndTime() { + return p.rollupBuckets - 1 + } + if t < p.GetStartTime() { + return 0 + } + return int((t - p.startTime) / p.rollupTime) +} + +// Return the start time of an aggregation bucket by id +func (p *DBPartition) GetAggregationBucketStartTime(id int) int64 { + return p.startTime + int64(id)*p.rollupTime +} + +// Return the end time of an aggregation bucket by id +func (p *DBPartition) GetAggregationBucketEndTime(id int) int64 { + return p.startTime + int64(id+1)*p.rollupTime - 1 +} + +func (p *DBPartition) Times2BucketRange(start, end int64) []int { + var buckets []int + + if start > p.GetEndTime() || end < p.startTime { + return buckets + } + + startingAggrBucket := p.Time2Bucket(start) + endAggrBucket := p.Time2Bucket(end) + + for bucketID := startingAggrBucket; bucketID <= endAggrBucket; bucketID++ { + buckets = append(buckets, bucketID) + } + + return buckets +} + +// Return the nearest chunk start time for the specified time +func (p *DBPartition) GetChunkMint(t int64) int64 { + if t > p.GetEndTime() { + return p.GetEndTime() - p.chunkInterval + 1 + } + if t < p.GetStartTime() { + return p.startTime + } + return p.chunkInterval * (t / p.chunkInterval) +} + +// Check whether the specified time (t) is within the range of the chunk starting at the specified start time (mint) +func (p *DBPartition) InChunkRange(mint, t int64) bool { + return t >= mint && t < (mint+p.chunkInterval) +} + +// Check whether the specified time (t) is ahead of the range of the chunk starting at the specified start time (mint) +func (p *DBPartition) IsAheadOfChunk(mint, t int64) bool { + return t >= (mint + p.chunkInterval) +} + +// Return the ID of the chunk whose range includes the specified time +func (p *DBPartition) TimeToChunkID(tmilli int64) (int, error) { + if tmilli >= p.startTime && tmilli <= p.GetEndTime() { + return int((tmilli-p.startTime)/p.chunkInterval) + 1, nil + } + return -1, errors.Errorf("Time %d isn't within the range of this partition.", tmilli) +} + +// Check if a chunk (by attribute name) is in the given time range. +func (p *DBPartition) IsChunkInRangeByAttr(attr string, mint, maxt int64) bool { + + // Discard '_v' prefix + chunkIDStr := attr[2:] + chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) + if err != nil { + return false + } + + chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval + chunkEndTime := chunkStartTime + p.chunkInterval - 1 + + return mint <= chunkStartTime && maxt >= chunkEndTime +} + +// Get a chunk's start time by it's attribute name +func (p *DBPartition) GetChunkStartTimeByAttr(attr string) (int64, error) { + + // Discard '_v' prefix + chunkIDStr := attr[2:] + chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) + if err != nil { + return 0, err + } + + chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval + + return chunkStartTime, nil +} + +// Check whether the specified time is within the range of this partition +func (p *DBPartition) InRange(t int64) bool { + if p.manager.cyclic { + return true + } + return t >= p.startTime && t <= p.GetEndTime() +} + +// Return the start time (mint) and end time (maxt) for this partition; +// maxt may be required for a cyclic partition +func (p *DBPartition) GetPartitionRange() (int64, int64) { + // Start p.days ago, rounded to next hour + return p.startTime, p.startTime + p.partitionInterval +} + +// Return the attribute name of the given chunk +func (p *DBPartition) ChunkID2Attr(col string, id int) string { + return fmt.Sprintf("_%s%d", col, id) +} + +// Return the attributes that need to be retrieved for the specified time range +func (p *DBPartition) Range2Attrs(col string, mint, maxt int64) ([]string, int64) { + list := p.Range2Cids(mint, maxt) + var strList []string + for _, id := range list { + strList = append(strList, p.ChunkID2Attr(col, id)) + } + + var firstAttrTime int64 + if mint < p.startTime { + firstAttrTime = p.startTime + } else { + firstAttrTime = p.startTime + ((mint-p.startTime)/p.chunkInterval)*p.chunkInterval + } + return strList, firstAttrTime +} + +// Return a list of all the chunk IDs that match the specified time range +func (p *DBPartition) Range2Cids(mint, maxt int64) []int { + var list []int + start, err := p.TimeToChunkID(mint) + if err != nil { + start = 1 + } + end, err := p.TimeToChunkID(maxt) + if err != nil { + end = int(p.partitionInterval / p.chunkInterval) + } + for i := start; i <= end; i++ { + list = append(list, i) + } + return list +} + +func (p *DBPartition) GetHashingBuckets() int { + return p.manager.schemaConfig.TableSchemaInfo.ShardingBucketsCount +} + +func (p *DBPartition) ToMap() map[string]interface{} { + attributes := make(map[string]interface{}, 5) + attributes["aggregates"] = aggregate.MaskToString(p.AggrType()) + attributes["rollupTime"] = p.rollupTime + attributes["chunkInterval"] = p.chunkInterval + attributes["partitionInterval"] = p.partitionInterval + attributes["retentionDays"] = p.retentionDays + return attributes +} + +// Convert a time in milliseconds to day and hour integers +func TimeToDHM(tmilli int64) (int, int) { + t := int(tmilli / 1000) + h := (t / 3600) % 24 + d := t / 3600 / 24 + return d, h +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr_test.go new file mode 100644 index 00000000..f661f65a --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr_test.go @@ -0,0 +1,176 @@ +// +build unit + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package partmgr + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" +) + +func TestCreateNewPartition(tst *testing.T) { + manager := getPartitionManager(tst) + interval := manager.currentPartitionInterval + startTime := interval + 1 + + // First partition + part, err := manager.TimeToPart(startTime + interval) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + assert.Equal(tst, 1, len(manager.partitions)) + assert.Equal(tst, manager.headPartition, part) + + // New head + part, err = manager.TimeToPart(startTime + (interval * 3)) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + assert.Equal(tst, 2, len(manager.partitions)) + assert.Equal(tst, manager.headPartition, part) + + // Add first + part, err = manager.TimeToPart(startTime) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + assert.Equal(tst, 3, len(manager.partitions)) + assert.Equal(tst, manager.partitions[0], part) +} + +func getPartitionManager(tst *testing.T) *PartitionManager { + const dummyConfig = `path: "/test"` + v3ioConfig, err := config.GetOrLoadFromData([]byte(dummyConfig)) + if err != nil { + tst.Fatalf("Failed to obtain a TSDB configuration. Error: %v", err) + } + + schm, err := schema.NewSchema(v3ioConfig, "1/s", "1h", "*", "") + if err != nil { + tst.Fatalf("Failed to create a TSDB schema. Error: %v", err) + } + + manager, err := NewPartitionMngr(schm, nil, v3ioConfig) + if err != nil { + tst.Fatalf("Failed to create a partition manager. Error: %v", err) + } + + return manager +} + +func TestNewPartitionMngrBadInput(t *testing.T) { + schemaConfig := &config.Schema{ + Partitions: []*config.Partition{{}}, + PartitionSchemaInfo: config.PartitionSchema{ + AggregationGranularity: "boo", + }, + } + v3ioConfig, err := config.GetOrLoadFromStruct(&config.V3ioConfig{}) + assert.NoError(t, err) + _, err = NewPartitionMngr(schemaConfig, nil, v3ioConfig) + assert.Error(t, err) +} + +func TestPartsForRange(tst *testing.T) { + numPartitions := 5 + manager := getPartitionManager(tst) + interval := manager.currentPartitionInterval + for i := 1; i <= numPartitions; i++ { + _, err := manager.TimeToPart(interval * int64(i)) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + } + assert.Equal(tst, numPartitions, len(manager.partitions)) + // Get all partitions + assert.Equal(tst, manager.partitions, manager.PartsForRange(0, interval*int64(numPartitions+1), true)) + // Get no partitions + assert.Equal(tst, 0, len(manager.PartsForRange(0, interval-1, true)), true) + // Get the first 2 partitions + parts := manager.PartsForRange(0, interval*2+1, true) + assert.Equal(tst, 2, len(parts)) + assert.Equal(tst, manager.partitions[0], parts[0]) + assert.Equal(tst, manager.partitions[1], parts[1]) + // Get the middle 3 partitions + parts = manager.PartsForRange(interval*2, interval*4+1, true) + assert.Equal(tst, 3, len(parts)) + assert.Equal(tst, manager.partitions[1], parts[0]) + assert.Equal(tst, manager.partitions[2], parts[1]) + assert.Equal(tst, manager.partitions[3], parts[2]) + // Get the middle partition by inclusive=false + parts = manager.PartsForRange(interval*2+1, interval*4+1, false) + assert.Equal(tst, 1, len(parts)) + assert.Equal(tst, manager.partitions[2], parts[0]) + // Get the middle partition by inclusive=false + parts = manager.PartsForRange(interval*2-1, interval*4+1, false) + assert.Equal(tst, 2, len(parts)) + assert.Equal(tst, manager.partitions[1], parts[0]) + assert.Equal(tst, manager.partitions[2], parts[1]) +} + +func TestTime2Bucket(tst *testing.T) { + manager := getPartitionManager(tst) + part, _ := manager.TimeToPart(1000000) + assert.Equal(tst, 0, part.Time2Bucket(100)) + assert.Equal(tst, part.rollupBuckets-1, part.Time2Bucket(part.startTime+part.partitionInterval+1)) + assert.Equal(tst, part.rollupBuckets/2, part.Time2Bucket((part.startTime+part.partitionInterval)/2)) +} + +func TestGetChunkMint(tst *testing.T) { + manager := getPartitionManager(tst) + part, err := manager.TimeToPart(manager.currentPartitionInterval) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + assert.Equal(tst, part.startTime, part.GetChunkMint(0)) + assert.Equal(tst, part.startTime, part.GetChunkMint(part.startTime+1)) + assert.Equal(tst, part.startTime+part.chunkInterval, part.GetChunkMint(part.startTime+part.chunkInterval+100)) + assert.Equal(tst, part.GetEndTime()-part.chunkInterval+1, part.GetChunkMint(part.GetEndTime()+100)) +} + +func TestInRange(tst *testing.T) { + manager := getPartitionManager(tst) + part, _ := manager.TimeToPart(manager.currentPartitionInterval) + assert.Equal(tst, false, part.InRange(part.GetStartTime()-100)) + assert.Equal(tst, false, part.InRange(part.GetEndTime()+100)) + assert.Equal(tst, true, part.InRange(part.GetStartTime()+part.partitionInterval/2)) +} + +func TestRange2Cids(tst *testing.T) { + manager := getPartitionManager(tst) + part, _ := manager.TimeToPart(manager.currentPartitionInterval) + numChunks := int(part.partitionInterval / part.chunkInterval) + var cids []int + for i := 1; i <= numChunks; i++ { + cids = append(cids, i) + } + assert.Equal(tst, cids, part.Range2Cids(0, part.GetEndTime()+100)) + assert.Equal(tst, []int{3, 4, 5}, part.Range2Cids(part.startTime+2*part.chunkInterval, part.startTime+5*part.chunkInterval-1)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go new file mode 100644 index 00000000..c79e7c61 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go @@ -0,0 +1,283 @@ +package pquerier + +import ( + "strings" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Chunk-list series iterator +type RawChunkIterator struct { + mint, maxt, aggregationWindow int64 + + chunks []chunkenc.Chunk + encoding chunkenc.Encoding + + chunkIndex int + chunksMax []int64 + iter chunkenc.Iterator + log logger.Logger + + prevT int64 + prevV float64 +} + +func newRawChunkIterator(queryResult *qryResults, log logger.Logger) utils.SeriesIterator { + maxt := queryResult.query.maxt + maxTime := queryResult.fields[config.MaxTimeAttrName] + if maxTime != nil && int64(maxTime.(int)) < maxt { + maxt = int64(maxTime.(int)) + } + + var aggregationWindow int64 + if queryResult.query.aggregationParams != nil { + aggregationWindow = queryResult.query.aggregationParams.GetAggregationWindow() + } + newIterator := RawChunkIterator{ + mint: queryResult.query.mint, + maxt: maxt, + aggregationWindow: aggregationWindow, + log: log.GetChild("rawChunkIterator"), + encoding: queryResult.encoding} + + newIterator.AddChunks(queryResult) + + if len(newIterator.chunks) == 0 { + // If there's no data, create a null iterator + return &utils.NullSeriesIterator{} + } + newIterator.iter = newIterator.chunks[0].Iterator() + return &newIterator +} + +// Advance the iterator to the specified chunk and time +func (it *RawChunkIterator) Seek(t int64) bool { + + // Seek time is after the item's end time (maxt) + if t > it.maxt { + return false + } + + // Seek to the first valid value after t + if t < it.mint-it.aggregationWindow { + t = it.mint - it.aggregationWindow + } + + // Check the first element + t0, _ := it.iter.At() + if t0 > it.maxt { + return false + } + if t <= t0 { + return true + } + + for { + it.updatePrevPoint() + if it.iter.Next() { + t0, _ := it.iter.At() + if t0 > it.maxt { + return false + } + if t > it.chunksMax[it.chunkIndex] { + // This chunk is too far behind; move to the next chunk or + // Return false if it's the last chunk + if it.chunkIndex == len(it.chunks)-1 { + return false + } + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + } else if t <= t0 { + // The cursor (t0) is either on t or just passed t + return true + } + } else { + // End of chunk; move to the next chunk or return if last + if it.chunkIndex == len(it.chunks)-1 { + return false + } + + // Free up memory of old chunk + it.chunks[it.chunkIndex] = nil + + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + } + } +} + +func (it *RawChunkIterator) updatePrevPoint() { + t, v := it.At() + if !(t == 0 && v == 0) { + it.prevT, it.prevV = t, v + } +} + +// Move to the next iterator item +func (it *RawChunkIterator) Next() bool { + it.updatePrevPoint() + if it.iter.Next() { + t, _ := it.iter.At() + if t < it.mint-it.aggregationWindow { + if !it.Seek(it.mint) { + return false + } + t, _ = it.At() + + return t <= it.maxt + } + if t <= it.maxt { + return true + } + return false + } + + if err := it.iter.Err(); err != nil { + return false + } + if it.chunkIndex == len(it.chunks)-1 { + return false + } + + // Free up memory of old chunk + it.chunks[it.chunkIndex] = nil + + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + return it.Next() +} + +// Read the time and value at the current location +func (it *RawChunkIterator) At() (t int64, v float64) { return it.iter.At() } + +func (it *RawChunkIterator) AtString() (t int64, v string) { return it.iter.AtString() } + +func (it *RawChunkIterator) Err() error { return it.iter.Err() } + +func (it *RawChunkIterator) Encoding() chunkenc.Encoding { return it.encoding } + +func (it *RawChunkIterator) AddChunks(item *qryResults) { + var chunks []chunkenc.Chunk + var chunksMax []int64 + if item.query.maxt > it.maxt { + it.maxt = item.query.maxt + } + if item.query.mint < it.mint { + it.mint = item.query.mint + } + _, firstChunkTime := item.query.partition.Range2Attrs("v", it.mint, it.maxt) + // Create and initialize a chunk encoder per chunk blob + i := 0 + for _, attr := range item.query.attrs { + + // In case we get both raw chunks and server aggregates, only go over the chunks. + if !strings.Contains(attr, config.AggregateAttrPrefix) { + values := item.fields[attr] + if values != nil { + bytes := values.([]byte) + + chunk, err := chunkenc.FromData(it.log, it.encoding, bytes, 0) + if err != nil { + it.log.ErrorWith("Error reading chunk buffer", "columns", item.query.attrs, "err", err) + } else { + chunks = append(chunks, chunk) + // Calculate the end time for the current chunk + chunksMax = append(chunksMax, + firstChunkTime+int64(i+1)*item.query.partition.TimePerChunk()-1) + } + } + i++ + } + } + + // Add new chunks sorted + if len(chunksMax) != 0 { + if len(it.chunksMax) == 0 || it.chunksMax[len(it.chunksMax)-1] < chunksMax[0] { + it.chunks = append(it.chunks, chunks...) + it.chunksMax = append(it.chunksMax, chunksMax...) + } else { + for i := 0; i < len(it.chunksMax); i++ { + if it.chunksMax[i] > chunksMax[0] { + endChunks := append(chunks, it.chunks[i:]...) + it.chunks = append(it.chunks[:i], endChunks...) + + endMaxChunks := append(chunksMax, it.chunksMax[i:]...) + it.chunksMax = append(it.chunksMax[:i], endMaxChunks...) + + // If we are inserting a new chunk to the beginning set the current iterator to the new first chunk + if i == 0 { + it.iter = it.chunks[0].Iterator() + } + break + } + } + } + } +} + +func (it *RawChunkIterator) PeakBack() (t int64, v float64) { return it.prevT, it.prevV } + +func NewRawSeries(results *qryResults, logger logger.Logger) (utils.Series, error) { + newSeries := V3ioRawSeries{fields: results.fields, logger: logger, encoding: results.encoding} + err := newSeries.initLabels() + if err != nil { + return nil, err + } + newSeries.iter = newRawChunkIterator(results, logger) + return &newSeries, nil +} + +type V3ioRawSeries struct { + fields map[string]interface{} + lset utils.Labels + iter utils.SeriesIterator + logger logger.Logger + hash uint64 + encoding chunkenc.Encoding +} + +func (s *V3ioRawSeries) Labels() utils.Labels { return s.lset } + +// Get the unique series key for sorting +func (s *V3ioRawSeries) GetKey() uint64 { + if s.hash == 0 { + s.hash = s.lset.Hash() + } + return s.hash +} + +func (s *V3ioRawSeries) Iterator() utils.SeriesIterator { return s.iter } + +func (s *V3ioRawSeries) AddChunks(results *qryResults) { + switch iter := s.iter.(type) { + case *RawChunkIterator: + iter.AddChunks(results) + case *utils.NullSeriesIterator: + s.iter = newRawChunkIterator(results, s.logger) + } +} + +// Initialize the label set from _lset and _name attributes +func (s *V3ioRawSeries) initLabels() error { + name, ok := s.fields[config.MetricNameAttrName].(string) + if !ok { + return errors.Errorf("error in initLabels; bad metric name: %v", s.fields[config.MetricNameAttrName].(string)) + } + lsetAttr, ok := s.fields[config.LabelSetAttrName].(string) + if !ok { + return errors.Errorf("error in initLabels; bad labels set: %v", s.fields[config.LabelSetAttrName].(string)) + } + + lset, err := utils.LabelsFromStringWithName(name, lsetAttr) + + if err != nil { + return errors.Errorf("error in initLabels; failed to parse labels set string: %v. err: %v", s.fields[config.LabelSetAttrName].(string), err) + } + + s.lset = lset + return nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go new file mode 100644 index 00000000..28030d2a --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go @@ -0,0 +1,118 @@ +// +build integration + +package pquerier_test + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const baseTestTime = int64(1547510400000) // 15/01/2019 00:00:00 + +type testRawChunkIterSuite struct { + suite.Suite + v3ioConfig *config.V3ioConfig + suiteTimestamp int64 +} + +func (suite *testRawChunkIterSuite) SetupSuite() { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + suite.Require().NoError(err) + + suite.v3ioConfig = v3ioConfig + suite.suiteTimestamp = time.Now().Unix() +} + +func (suite *testRawChunkIterSuite) SetupTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + tsdbtest.CreateTestTSDB(suite.T(), suite.v3ioConfig) +} + +func (suite *testRawChunkIterSuite) TearDownTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + if !suite.T().Failed() { + tsdbtest.DeleteTSDB(suite.T(), suite.v3ioConfig) + } +} + +func (suite *testRawChunkIterSuite) TestRawChunkIteratorWithZeroValue() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err) + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + ingestData := []tsdbtest.DataPoint{{baseTestTime, 10}, + {baseTestTime + tsdbtest.MinuteInMillis, 0}, + {baseTestTime + 2*tsdbtest.MinuteInMillis, 30}, + {baseTestTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err) + + params, _, _ := pquerier.ParseQuery("select cpu") + params.From = baseTestTime + params.To = baseTestTime + int64(numberOfEvents*eventsInterval) + + set, err := querierV2.Select(params) + suite.Require().NoError(err) + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator().(*pquerier.RawChunkIterator) + + var index int + for iter.Next() { + t, v := iter.At() + prevT, prevV := iter.PeakBack() + + suite.Require().Equal(ingestData[index].Time, t, "current time does not match") + + switch val := ingestData[index].Value.(type) { + case float64: + suite.Require().Equal(val, v, "current value does not match") + case int: + suite.Require().Equal(float64(val), v, "current value does not match") + default: + suite.Require().Equal(val, v, "current value does not match") + } + + if index > 0 { + suite.Require().Equal(ingestData[index-1].Time, prevT, "current time does not match") + switch val := ingestData[index-1].Value.(type) { + case float64: + suite.Require().Equal(val, prevV, "current value does not match") + case int: + suite.Require().Equal(float64(val), prevV, "current value does not match") + default: + suite.Require().Equal(val, prevV, "current value does not match") + } + } + index++ + } + } + + suite.Require().Equal(1, seriesCount, "series count didn't match expected") +} + +func TestRawChunkIterSuite(t *testing.T) { + suite.Run(t, new(testRawChunkIterSuite)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go new file mode 100644 index 00000000..4be0430b --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go @@ -0,0 +1,373 @@ +package pquerier + +import ( + "encoding/binary" + "math" + + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +/* main query flow logic + +fire GetItems to all partitions and tables + +iterate over results from first to last partition + hash lookup (over labels only w/o name) to find dataFrame + if not found create new dataFrame + based on hash dispatch work to one of the parallel collectors + collectors convert raw/array data to series and aggregate or group + +once collectors are done (wg.done) return SeriesSet (prom compatible) or FrameSet (iguazio column interface) + final aggregators (Avg, Stddav/var, ..) are formed from raw aggr in flight via iterators + - Series: have a single name and optional aggregator per time series, values limited to Float64 + - Frames: have index/time column(s) and multiple named/typed value columns (one per metric name * function) + +** optionally can return SeriesSet (from dataFrames) to Prom immediately after we completed GetItems iterators + and block (wg.done) the first time Prom tries to access the SeriesIter data (can lower latency) + + if result set needs to be ordered we can also sort the dataFrames based on Labels data (e.g. order-by username) + in parallel to having all the time series data processed by the collectors + +*/ + +/* collector logic: + +- get qryResults from chan + +- if raw query + if first partition + create series + else + append chunks to existing series + +- if vector query (results are bucketed over time or grouped by) + if first partition + create & init array per function (and per name) based on query metadata/results + + init child raw-chunk or attribute iterators + iterate over data and fill bucketed arrays + if missing time or data use interpolation + +- if got fin message and processed last item + use sync waitgroup to signal the main that the go routines are done + will allow main flow to continue and serve the results, no locks are required + +*/ + +// Main collector which processes query results from a channel and then dispatches them according to query type. +// Query types: raw data, server-side aggregates, client-side aggregates +func mainCollector(ctx *selectQueryContext, responseChannel chan *qryResults) { + defer ctx.wg.Done() + + lastTimePerMetric := make(map[uint64]int64, len(ctx.columnsSpecByMetric)) + lastValuePerMetric := make(map[uint64]float64, len(ctx.columnsSpecByMetric)) + + for { + select { + case _ = <-ctx.stopChan: + return + case res, ok := <-responseChannel: + if !ok { + return + } + if res.IsRawQuery() { + err := rawCollector(ctx, res) + if err != nil { + ctx.errorChannel <- err + return + } + } else { + err := res.frame.addMetricIfNotExist(res.name, ctx.getResultBucketsSize(), res.IsServerAggregates()) + if err != nil { + ctx.logger.Error("problem adding new metric '%v', lset: %v, err:%v", res.name, res.frame.lset, err) + ctx.errorChannel <- err + return + } + lsetAttr, _ := res.fields[config.LabelSetAttrName].(string) + lset, _ := utils.LabelsFromString(lsetAttr) + lset = append(lset, utils.Label{Name: config.MetricNameAttrName, Value: res.name}) + currentResultHash := lset.Hash() + + // Aggregating cross series aggregates, only supported over raw data. + if ctx.isCrossSeriesAggregate { + lastTimePerMetric[currentResultHash], lastValuePerMetric[currentResultHash], _ = aggregateClientAggregatesCrossSeries(ctx, res, lastTimePerMetric[currentResultHash], lastValuePerMetric[currentResultHash]) + } else { + // Aggregating over time aggregates + if res.IsServerAggregates() { + aggregateServerAggregates(ctx, res) + } else if res.IsClientAggregates() { + aggregateClientAggregates(ctx, res) + } + } + + // It is possible to query an aggregate and down sample raw chunks in the same df. + if res.IsDownsample() { + lastTimePerMetric[currentResultHash], lastValuePerMetric[currentResultHash], err = downsampleRawData(ctx, res, lastTimePerMetric[currentResultHash], lastValuePerMetric[currentResultHash]) + if err != nil { + ctx.logger.Error("problem downsampling '%v', lset: %v, err:%v", res.name, res.frame.lset, err) + ctx.errorChannel <- err + return + } + } + } + } + } +} + +func rawCollector(ctx *selectQueryContext, res *qryResults) error { + ctx.logger.Debug("using Raw Collector for metric %v", res.name) + + if res.frame.isWildcardSelect { + columnIndex, ok := res.frame.columnByName[res.name] + if ok { + res.frame.rawColumns[columnIndex].(*V3ioRawSeries).AddChunks(res) + } else { + series, err := NewRawSeries(res, ctx.logger.GetChild("v3ioRawSeries")) + if err != nil { + return err + } + res.frame.rawColumns = append(res.frame.rawColumns, series) + res.frame.columnByName[res.name] = len(res.frame.rawColumns) - 1 + } + } else { + columnIndex := res.frame.columnByName[res.name] + rawColumn := res.frame.rawColumns[columnIndex] + if rawColumn != nil { + res.frame.rawColumns[columnIndex].(*V3ioRawSeries).AddChunks(res) + } else { + series, err := NewRawSeries(res, ctx.logger.GetChild("v3ioRawSeries")) + if err != nil { + return err + } + res.frame.rawColumns[columnIndex] = series + } + } + return nil +} + +func aggregateClientAggregates(ctx *selectQueryContext, res *qryResults) { + ctx.logger.Debug("using Client Aggregates Collector for metric %v", res.name) + it := newRawChunkIterator(res, ctx.logger) + for it.Next() { + t, v := it.At() + + if res.query.aggregationParams.HasAggregationWindow() { + windowAggregation(ctx, res, t, v) + } else { + intervalAggregation(ctx, res, t, v) + } + } +} + +func aggregateServerAggregates(ctx *selectQueryContext, res *qryResults) { + ctx.logger.Debug("using Server Aggregates Collector for metric %v", res.name) + + partitionStartTime := res.query.partition.GetStartTime() + rollupInterval := res.query.aggregationParams.GetRollupTime() + for _, col := range res.frame.columns { + if col.GetColumnSpec().metric == res.name && + aggregate.HasAggregates(col.GetColumnSpec().function) && + col.GetColumnSpec().isConcrete() { + + array, ok := res.fields[aggregate.ToAttrName(col.GetColumnSpec().function)] + if !ok { + ctx.logger.Warn("requested function %v was not found in response", col.GetColumnSpec().function) + } else { + // go over the byte array and convert each uint as we go to save memory allocation + bytes := array.([]byte) + + for i := 16; i+8 <= len(bytes); i += 8 { + val := binary.LittleEndian.Uint64(bytes[i : i+8]) + currentValueIndex := (i - 16) / 8 + + // Calculate server side aggregate bucket by its median time + currentValueTime := partitionStartTime + int64(currentValueIndex)*rollupInterval + rollupInterval/2 + currentCell := (currentValueTime - ctx.queryParams.From) / res.query.aggregationParams.Interval + + var floatVal float64 + if aggregate.IsCountAggregate(col.GetColumnSpec().function) { + floatVal = float64(val) + } else { + floatVal = math.Float64frombits(val) + } + + bottomMargin := res.query.aggregationParams.Interval + if res.query.aggregationParams.HasAggregationWindow() { + bottomMargin = res.query.aggregationParams.GetAggregationWindow() + } + if currentValueTime >= ctx.queryParams.From-bottomMargin && currentValueTime <= ctx.queryParams.To+res.query.aggregationParams.Interval { + if !res.query.aggregationParams.HasAggregationWindow() { + _ = res.frame.setDataAt(col.Name(), int(currentCell), floatVal) + } else { + windowAggregationWithServerAggregates(ctx, res, col, currentValueTime, floatVal) + } + } + } + } + } + } +} + +func downsampleRawData(ctx *selectQueryContext, res *qryResults, + previousPartitionLastTime int64, previousPartitionLastValue float64) (int64, float64, error) { + ctx.logger.Debug("using Downsample Collector for metric %v", res.name) + + it, ok := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) + if !ok { + return previousPartitionLastTime, previousPartitionLastValue, nil + } + col, err := res.frame.Column(res.name) + if err != nil { + return previousPartitionLastTime, previousPartitionLastValue, err + } + for currCell := 0; currCell < col.Len(); currCell++ { + currCellTime := int64(currCell)*ctx.queryParams.Step + ctx.queryParams.From + prev, err := col.getBuilder().At(currCell) + + // Only update a cell if it hasn't been set yet + if prev == nil || err != nil { + if it.Seek(currCellTime) { + t, v := it.At() + if t == currCellTime { + _ = res.frame.setDataAt(col.Name(), currCell, v) + } else { + prevT, prevV := it.PeakBack() + + // In case it's the first point in the partition use the last point of the previous partition for the interpolation + if prevT == 0 { + prevT = previousPartitionLastTime + prevV = previousPartitionLastValue + } + interpolatedT, interpolatedV := col.GetInterpolationFunction()(prevT, t, currCellTime, prevV, v) + + // Check if the interpolation was successful in terms of exceeding tolerance + if !(interpolatedT == 0 && interpolatedV == 0) { + _ = res.frame.setDataAt(col.Name(), currCell, interpolatedV) + } + } + } + } + } + + lastT, lastV := it.At() + return lastT, lastV, nil +} + +func aggregateClientAggregatesCrossSeries(ctx *selectQueryContext, res *qryResults, previousPartitionLastTime int64, previousPartitionLastValue float64) (int64, float64, error) { + ctx.logger.Debug("using Client Aggregates Collector for metric %v", res.name) + it, ok := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) + if !ok { + return previousPartitionLastTime, previousPartitionLastValue, nil + } + + var previousPartitionEndBucket int + if previousPartitionLastTime != 0 { + previousPartitionEndBucket = int((previousPartitionLastTime-ctx.queryParams.From)/ctx.queryParams.Step) + 1 + } + maxBucketForPartition := int((res.query.partition.GetEndTime() - ctx.queryParams.From) / ctx.queryParams.Step) + if maxBucketForPartition > ctx.getResultBucketsSize() { + maxBucketForPartition = ctx.getResultBucketsSize() + } + + for currBucket := previousPartitionEndBucket; currBucket < maxBucketForPartition; currBucket++ { + currBucketTime := int64(currBucket)*ctx.queryParams.Step + ctx.queryParams.From + + if it.Seek(currBucketTime) { + t, v := it.At() + if t == currBucketTime { + for _, col := range res.frame.columns { + if col.GetColumnSpec().metric == res.name { + _ = res.frame.setDataAt(col.Name(), currBucket, v) + } + } + } else { + prevT, prevV := it.PeakBack() + + // In case it's the first point in the partition use the last point of the previous partition for the interpolation + if prevT == 0 { + prevT = previousPartitionLastTime + prevV = previousPartitionLastValue + } + + for _, col := range res.frame.columns { + if col.GetColumnSpec().metric == res.name { + interpolatedT, interpolatedV := col.GetInterpolationFunction()(prevT, t, currBucketTime, prevV, v) + if !(interpolatedT == 0 && interpolatedV == 0) { + _ = res.frame.setDataAt(col.Name(), currBucket, interpolatedV) + } + } + } + } + } else { + break + } + } + + lastT, lastV := it.At() + return lastT, lastV, nil +} + +func intervalAggregation(ctx *selectQueryContext, res *qryResults, t int64, v float64) { + currentCell := getRelativeCell(t, ctx.queryParams.From, res.query.aggregationParams.Interval, false) + aggregateAllColumns(res, currentCell, v) +} + +func windowAggregation(ctx *selectQueryContext, res *qryResults, t int64, v float64) { + currentCell := getRelativeCell(t, ctx.queryParams.From, res.query.aggregationParams.Interval, true) + aggregationWindow := res.query.aggregationParams.GetAggregationWindow() + + if aggregationWindow > res.query.aggregationParams.Interval { + currentCellTime := ctx.queryParams.From + currentCell*res.query.aggregationParams.Interval + maximumAffectedTime := t + aggregationWindow + numAffectedCells := (maximumAffectedTime-currentCellTime)/res.query.aggregationParams.Interval + 1 // +1 to include the current cell + + for i := int64(0); i < numAffectedCells; i++ { + aggregateAllColumns(res, currentCell+i, v) + } + } else if aggregationWindow < res.query.aggregationParams.Interval { + if t+aggregationWindow >= ctx.queryParams.From+currentCell*res.query.aggregationParams.Interval { + aggregateAllColumns(res, currentCell, v) + } + } else { + aggregateAllColumns(res, currentCell, v) + } +} + +func windowAggregationWithServerAggregates(ctx *selectQueryContext, res *qryResults, column Column, t int64, v float64) { + currentCell := getRelativeCell(t, ctx.queryParams.From, res.query.aggregationParams.Interval, true) + + aggregationWindow := res.query.aggregationParams.GetAggregationWindow() + if aggregationWindow > res.query.aggregationParams.Interval { + currentCellTime := ctx.queryParams.From + currentCell*res.query.aggregationParams.Interval + maxAffectedTime := t + aggregationWindow + numAffectedCells := (maxAffectedTime-currentCellTime)/res.query.aggregationParams.Interval + 1 // +1 to include the current cell + + for i := int64(0); i < numAffectedCells; i++ { + _ = res.frame.setDataAt(column.Name(), int(currentCell+i), v) + } + } else { + _ = res.frame.setDataAt(column.Name(), int(currentCell), v) + } +} + +func getRelativeCell(time, beginning, interval int64, roundUp bool) int64 { + cell := (time - beginning) / interval + + if roundUp && (time-beginning)%interval > 0 { + cell++ + } + + return cell +} + +// Set data to all aggregated columns for the given metric +func aggregateAllColumns(res *qryResults, cell int64, value float64) { + for _, col := range res.frame.columns { + colSpec := col.GetColumnSpec() + if colSpec.metric == res.name && colSpec.function != 0 { + _ = res.frame.setDataAt(col.Name(), int(cell), value) + } + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go new file mode 100644 index 00000000..a903122f --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go @@ -0,0 +1,864 @@ +package pquerier + +import ( + "fmt" + "math" + "time" + + "github.com/pkg/errors" + "github.com/v3io/frames" + "github.com/v3io/frames/pb" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type frameIterator struct { + ctx *selectQueryContext + setIndex int + seriesIndex int + columnNum int + err error +} + +// create new frame set iterator, frame iter has a SeriesSet interface (for Prometheus) plus columnar interfaces +func newFrameIterator(ctx *selectQueryContext) (*frameIterator, error) { + if !ctx.isRawQuery() { + for _, f := range ctx.frameList { + if err := f.finishAllColumns(); err != nil { + return nil, errors.Wrapf(err, "failed to create columns for DF=%v", f.Labels()) + } + } + } + + return &frameIterator{ctx: ctx, columnNum: ctx.totalColumns, setIndex: 0, seriesIndex: -1}, nil +} + +// advance to the next data frame +func (fi *frameIterator) NextFrame() bool { + fi.setIndex++ + return fi.setIndex-1 < len(fi.ctx.frameList) +} + +// get current data frame +func (fi *frameIterator) GetFrame() (frames.Frame, error) { + return fi.ctx.frameList[fi.setIndex-1].GetFrame() +} + +// advance to the next time series (for Prometheus mode) +func (fi *frameIterator) Next() bool { + + var numberOfColumnsInCurrentSeries int + if len(fi.ctx.frameList) > 0 { + numberOfColumnsInCurrentSeries = len(fi.ctx.frameList[fi.setIndex].columnByName) + } + + if fi.seriesIndex < numberOfColumnsInCurrentSeries-1 { + // can advance series within a frame + fi.seriesIndex++ + } else if fi.setIndex+1 >= len(fi.ctx.frameList) { + // already in the last column in the last frame + return false + } else { + // advance to next frame + fi.setIndex++ + fi.seriesIndex = 0 + } + + if fi.isCurrentSeriesHidden() { + return fi.Next() + } + + series := fi.ctx.frameList[fi.setIndex] + // If raw series is nil + if series.isRawSeries && series.rawColumns[fi.seriesIndex] == nil { + return fi.Next() + } + + return true +} + +// get current time series (for Prometheus mode) +func (fi *frameIterator) At() utils.Series { + s, err := fi.ctx.frameList[fi.setIndex].TimeSeries(fi.seriesIndex) + if err != nil { + fi.err = err + } + return s +} + +func (fi *frameIterator) isCurrentSeriesHidden() bool { + if fi.ctx.isRawQuery() { + return false + } + col, err := fi.ctx.frameList[fi.setIndex].ColumnAt(fi.seriesIndex) + if err != nil { + fi.err = err + } + + return col.GetColumnSpec().isHidden +} + +func (fi *frameIterator) Err() error { + return fi.err +} + +// data frame, holds multiple value columns and an index (time) column +func newDataFrame(columnsSpec []columnMeta, indexColumn Column, lset utils.Labels, hash uint64, isRawQuery bool, columnSize int, useServerAggregates, showAggregateLabel bool) (*dataFrame, error) { + df := &dataFrame{lset: lset, hash: hash, isRawSeries: isRawQuery, showAggregateLabel: showAggregateLabel} + // is raw query + if isRawQuery { + df.columnByName = make(map[string]int, len(columnsSpec)) + + // Create the columns in the DF based on the requested columns order. + for i, col := range columnsSpec { + if col.metric == "" { + df.isWildcardSelect = true + break + } + df.columnByName[col.getColumnName()] = i + } + + // If no specific order was requested (like when querying for all metrics), + // discard order and reset columns for future initialization. + if df.isWildcardSelect { + df.columnByName = make(map[string]int, len(columnsSpec)) + df.rawColumns = []utils.Series{} + } else { + // Initialize `rawcolumns` to the requested size. + df.rawColumns = make([]utils.Series, len(columnsSpec)) + } + } else { + numOfColumns := len(columnsSpec) + df.index = indexColumn + df.columnByName = make(map[string]int, numOfColumns) + df.columns = make([]Column, 0, numOfColumns) + df.metricToCountColumn = map[string]Column{} + df.metrics = map[string]struct{}{} + df.nonEmptyRowsIndicators = make([]bool, columnSize) + + i := 0 + for _, col := range columnsSpec { + // In case user wanted all metrics, save the template for every metric. + // Once we know what metrics we have we will create Columns out of the column Templates + if col.isWildcard() { + df.columnsTemplates = append(df.columnsTemplates, col) + } else { + column, err := createColumn(col, columnSize, useServerAggregates) + if err != nil { + return nil, err + } + if aggregate.IsCountAggregate(col.function) { + df.metricToCountColumn[col.metric] = column + } + df.columns = append(df.columns, column) + df.columnByName[col.getColumnName()] = i + i++ + } + } + for _, col := range df.columns { + if !col.GetColumnSpec().isConcrete() { + fillDependantColumns(col, df) + } + } + } + + return df, nil +} + +func createColumn(col columnMeta, columnSize int, useServerAggregates bool) (Column, error) { + var column Column + if col.function != 0 { + if col.isConcrete() { + function, err := getAggreagteFunction(col.function, useServerAggregates) + if err != nil { + return nil, err + } + column = NewConcreteColumn(col.getColumnName(), col, columnSize, function) + } else { + function, err := getVirtualColumnFunction(col.function) + if err != nil { + return nil, err + } + + column = NewVirtualColumn(col.getColumnName(), col, columnSize, function) + } + } else { + column = newDataColumn(col.getColumnName(), col, columnSize, frames.FloatType) + } + + return column, nil +} + +func getAggreagteFunction(aggrType aggregate.AggrType, useServerAggregates bool) (func(interface{}, interface{}) interface{}, error) { + if useServerAggregates { + return aggregate.GetServerAggregationsFunction(aggrType) + } + return aggregate.GetClientAggregationsFunction(aggrType) +} + +func fillDependantColumns(wantedColumn Column, df *dataFrame) { + wantedAggregations := aggregate.GetDependantAggregates(wantedColumn.GetColumnSpec().function) + var columns []Column + + // Order of the dependent columns should be the same as `wantedAggregations`. + for _, agg := range wantedAggregations { + for _, col := range df.columns { + if col.GetColumnSpec().metric == wantedColumn.GetColumnSpec().metric && + agg == col.GetColumnSpec().function { + columns = append(columns, col) + } + } + } + wantedColumn.(*virtualColumn).dependantColumns = columns +} + +func getVirtualColumnFunction(aggrType aggregate.AggrType) (func([]Column, int) (interface{}, error), error) { + function, err := aggregate.GetServerVirtualAggregationFunction(aggrType) + if err != nil { + return nil, err + } + return func(columns []Column, index int) (interface{}, error) { + data := make([]float64, len(columns)) + for i, c := range columns { + v, err := c.FloatAt(index) + if err != nil { + return nil, err + } + + data[i] = v + } + return function(data), nil + }, nil +} + +type dataFrame struct { + lset utils.Labels + hash uint64 + showAggregateLabel bool + + isRawSeries bool + isRawColumnsGenerated bool + rawColumns []utils.Series + + columnsTemplates []columnMeta + columns []Column + index Column + columnByName map[string]int // name -> index in columns + nonEmptyRowsIndicators []bool + nullValuesMaps []*pb.NullValuesMap + + metrics map[string]struct{} + metricToCountColumn map[string]Column + + isWildcardSelect bool +} + +func (d *dataFrame) addMetricIfNotExist(metricName string, columnSize int, useServerAggregates bool) error { + if _, ok := d.metrics[metricName]; !ok { + return d.addMetricFromTemplate(metricName, columnSize, useServerAggregates) + } + return nil +} + +func (d *dataFrame) addMetricFromTemplate(metricName string, columnSize int, useServerAggregates bool) error { + var newColumns []Column + for _, col := range d.columnsTemplates { + col.metric = metricName + newCol, err := createColumn(col, columnSize, useServerAggregates) + if err != nil { + return err + } + + // Make sure there is only 1 count column per metric. + // Count is the only column we automatically add so in some cases we get multiple count columns in the templates. + _, ok := d.metricToCountColumn[metricName] + if !aggregate.IsCountAggregate(col.function) || !ok { + newColumns = append(newColumns, newCol) + } + if aggregate.IsCountAggregate(col.function) && !ok { + d.metricToCountColumn[metricName] = newCol + } + } + + numberOfColumns := len(d.columns) + d.columns = append(d.columns, newColumns...) + for i, col := range newColumns { + d.columnByName[col.GetColumnSpec().getColumnName()] = numberOfColumns + i + if !col.GetColumnSpec().isConcrete() { + fillDependantColumns(col, d) + } + } + d.metrics[metricName] = struct{}{} + return nil +} + +func (d *dataFrame) setDataAt(columnName string, index int, value interface{}) error { + colIndex, ok := d.columnByName[columnName] + if !ok { + return fmt.Errorf("no such column %v", columnName) + } + col := d.columns[colIndex] + err := col.SetDataAt(index, value) + if err == nil { + d.nonEmptyRowsIndicators[index] = true + } + + return err +} + +func (d *dataFrame) Len() int { + if d.isRawSeries { + return len(d.rawColumns) + } + return len(d.columns) +} + +func (d *dataFrame) Labels() utils.Labels { + return d.lset +} + +func (d *dataFrame) Names() []string { + names := make([]string, d.Len()) + + for i := 0; i < d.Len(); i++ { + names[i] = d.columns[i].Name() + } + + return names +} + +func (d *dataFrame) ColumnAt(i int) (Column, error) { + if i >= d.Len() { + return nil, fmt.Errorf("index %d out of bounds [0:%d]", i, d.Len()) + } + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + return d.columns[i], nil +} + +func (d *dataFrame) Columns() ([]Column, error) { + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + return d.columns, nil +} + +func (d *dataFrame) Column(name string) (Column, error) { + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + i, ok := d.columnByName[name] + if !ok { + return nil, fmt.Errorf("column %q not found", name) + } + + return d.columns[i], nil +} + +func (d *dataFrame) Index() (Column, error) { + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + return d.index, nil +} + +func (d *dataFrame) TimeSeries(i int) (utils.Series, error) { + if d.isRawSeries { + return d.rawColumns[i], nil + } + currentColumn, err := d.ColumnAt(i) + if err != nil { + return nil, err + } + + return NewDataFrameColumnSeries(d.index, + currentColumn, + d.metricToCountColumn[currentColumn.GetColumnSpec().metric], + d.Labels(), + d.hash, + d.showAggregateLabel), nil +} + +// Creates Frames.columns out of tsdb columns. +// First do all the concrete columns and then the virtual who are dependant on the concrete. +func (d *dataFrame) finishAllColumns() error { + // Marking as deleted every index (row) that has no data. + // Also, adding "blank" rows when needed to align all columns to the same time. + // Iterating backwards to not miss any deleted cell. + for i := len(d.nonEmptyRowsIndicators) - 1; i >= 0; i-- { + hasData := d.nonEmptyRowsIndicators[i] + if !hasData { + for _, col := range d.columns { + _ = col.Delete(i) + } + _ = d.index.Delete(i) + } else { + for _, col := range d.columns { + switch col.(type) { + case *ConcreteColumn, *dataColumn: + value, err := col.getBuilder().At(i) + if err != nil || value == nil { + err := col.getBuilder().Set(i, math.NaN()) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("could not create new column at index %d", i)) + } + } + } + } + } + } + + var columnSize int + var err error + for _, col := range d.columns { + switch col.(type) { + case *dataColumn: + err = col.finish() + case *ConcreteColumn: + err = col.finish() + if columnSize == 0 { + columnSize = col.FramesColumn().Len() + } else if columnSize != col.FramesColumn().Len() { + return fmt.Errorf("column length mismatch %v!=%v col=%v", columnSize, col.FramesColumn().Len(), col.Name()) + } + } + if err != nil { + return errors.Wrapf(err, "failed to create column '%v'", col.Name()) + } + } + for _, col := range d.columns { + switch newCol := col.(type) { + case *virtualColumn: + newCol.size = columnSize + err = col.finish() + } + if err != nil { + return errors.Wrapf(err, "failed to create column '%v'", col.Name()) + } + } + + err = d.index.finish() + + return err +} + +// Normalizing the raw data of different metrics to one timeline with both metric's times. +// +// for example the following time series: +// metric1 - (t0,v0), (t2, v1) +// metric2 - (t1,v2), (t2, v3) +// +// will be converted to: +// time metric1 metric2 +// t0 v0 NaN +// t1 NaN v2 +// t2 v1 v3 +// +func (d *dataFrame) rawSeriesToColumns() error { + var timeData []time.Time + var currentTime int64 + numberOfRawColumns := len(d.rawColumns) + columns := make([]frames.ColumnBuilder, numberOfRawColumns) + nonExhaustedIterators := numberOfRawColumns + seriesToDataType := make([]frames.DType, numberOfRawColumns) + seriesToDefaultValue := make([]interface{}, numberOfRawColumns) + nextTime := int64(math.MaxInt64) + seriesHasMoreData := make([]bool, numberOfRawColumns) + emptyMetrics := make(map[int]string) + + d.nullValuesMaps = make([]*pb.NullValuesMap, 0) + nullValuesRowIndex := -1 + + for i, rawSeries := range d.rawColumns { + if rawSeries == nil { + missingColumn := "(unknown column)" + for columnName, index := range d.columnByName { + if index == i { + missingColumn = columnName + break + } + } + emptyMetrics[i] = missingColumn + nonExhaustedIterators-- + continue + } + if rawSeries.Iterator().Next() { + seriesHasMoreData[i] = true + t, _ := rawSeries.Iterator().At() + if t < nextTime { + nextTime = t + } + } else { + nonExhaustedIterators-- + } + + currentEnc := chunkenc.EncXOR + if ser, ok := rawSeries.(*V3ioRawSeries); ok { + currentEnc = ser.encoding + } + + if currentEnc == chunkenc.EncVariant { + columns[i] = frames.NewSliceColumnBuilder(rawSeries.Labels().Get(config.PrometheusMetricNameAttribute), + frames.StringType, 0) + seriesToDataType[i] = frames.StringType + seriesToDefaultValue[i] = "" + } else { + columns[i] = frames.NewSliceColumnBuilder(rawSeries.Labels().Get(config.PrometheusMetricNameAttribute), + frames.FloatType, 0) + seriesToDataType[i] = frames.FloatType + seriesToDefaultValue[i] = math.NaN() + } + } + + for nonExhaustedIterators > 0 { + currentTime = nextTime + nextTime = int64(math.MaxInt64) + timeData = append(timeData, time.Unix(currentTime/1000, (currentTime%1000)*1e6)) + + // add new row to null values map + d.nullValuesMaps = append(d.nullValuesMaps, &pb.NullValuesMap{NullColumns: make(map[string]bool)}) + nullValuesRowIndex++ + + for seriesIndex, rawSeries := range d.rawColumns { + if rawSeries == nil { + continue + } + iter := rawSeries.Iterator() + + var v interface{} + var t int64 + + if seriesToDataType[seriesIndex] == frames.StringType { + t, v = iter.AtString() + } else { + t, v = iter.At() + } + + if t == currentTime { + e := columns[seriesIndex].Append(v) + if e != nil { + return errors.Wrap(e, fmt.Sprintf("could not append value %v", v)) + } + if iter.Next() { + t, _ = iter.At() + } else { + nonExhaustedIterators-- + seriesHasMoreData[seriesIndex] = false + } + } else { + e := columns[seriesIndex].Append(seriesToDefaultValue[seriesIndex]) + if e != nil { + return errors.Wrap(e, fmt.Sprintf("could not append from default value %v", seriesToDefaultValue[seriesIndex])) + } + d.nullValuesMaps[nullValuesRowIndex].NullColumns[columns[seriesIndex].Name()] = true + } + + if seriesHasMoreData[seriesIndex] && t < nextTime { + nextTime = t + } + } + } + + numberOfRows := len(timeData) + colSpec := columnMeta{metric: "time"} + d.index = newDataColumn("time", colSpec, numberOfRows, frames.TimeType) + e := d.index.SetData(timeData, numberOfRows) + if e != nil { + return errors.Wrap(e, fmt.Sprintf("could not set data, timeData=%v, numberOfRows=%v", timeData, numberOfRows)) + } + + d.columns = make([]Column, numberOfRawColumns) + + for i, series := range d.rawColumns { + if series == nil { + continue + } + + name := series.Labels().Get(config.PrometheusMetricNameAttribute) + spec := columnMeta{metric: name} + col := newDataColumn(name, spec, numberOfRows, seriesToDataType[i]) + col.framesCol = columns[i].Finish() + d.columns[i] = col + } + + if len(emptyMetrics) > 0 { + nullValues := make([]float64, numberOfRows) + for i := 0; i < numberOfRows; i++ { + nullValues[i] = math.NaN() + } + for index, metricName := range emptyMetrics { + spec := columnMeta{metric: metricName} + col := newDataColumn(metricName, spec, numberOfRows, frames.FloatType) + framesCol, err := frames.NewSliceColumn(metricName, nullValues) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("could not create empty column '%v'", metricName)) + } + col.framesCol = framesCol + d.columns[index] = col + + // mark empty columns + for i := 0; i < numberOfRows; i++ { + d.nullValuesMaps[i].NullColumns[col.name] = true + } + } + } + + d.isRawColumnsGenerated = true + + return nil +} + +func (d *dataFrame) shouldGenerateRawColumns() bool { return d.isRawSeries && !d.isRawColumnsGenerated } + +func (d *dataFrame) GetFrame() (frames.Frame, error) { + var framesColumns []frames.Column + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + for _, col := range d.columns { + if !col.GetColumnSpec().isHidden { + framesColumns = append(framesColumns, col.FramesColumn()) + } + } + + return frames.NewFrameWithNullValues(framesColumns, []frames.Column{d.index.FramesColumn()}, d.Labels().Map(), d.nullValuesMaps) +} + +// Column object, store a single value or index column/array +// There can be data columns or calculated columns (e.g. Avg built from count & sum columns) + +// Column is a data column +type Column interface { + Len() int // Number of elements + Name() string // Column name + DType() frames.DType // Data type (e.g. IntType, FloatType ...) + FloatAt(i int) (float64, error) // Float value at index i + StringAt(i int) (string, error) // String value at index i + TimeAt(i int) (time.Time, error) // time value at index i + GetColumnSpec() columnMeta // Get the column's metadata + SetDataAt(i int, value interface{}) error + SetData(d interface{}, size int) error + GetInterpolationFunction() InterpolationFunction + FramesColumn() frames.Column + Delete(index int) error + + setMetricName(name string) + getBuilder() frames.ColumnBuilder + finish() error +} + +type basicColumn struct { + name string + size int + spec columnMeta + interpolationFunction InterpolationFunction + builder frames.ColumnBuilder + framesCol frames.Column +} + +func (c *basicColumn) getBuilder() frames.ColumnBuilder { + return c.builder +} + +func (c *basicColumn) finish() error { + c.framesCol = c.builder.Finish() + return nil +} + +func (c *basicColumn) Delete(index int) error { + return c.builder.Delete(index) +} + +func (c *basicColumn) FramesColumn() frames.Column { + return c.framesCol +} + +// Name returns the column name +func (c *basicColumn) Name() string { + return c.name +} + +// Len returns the number of elements +func (c *basicColumn) Len() int { + if c.framesCol != nil { + return c.framesCol.Len() + } + return c.size +} + +func (c *basicColumn) isValidIndex(i int) bool { return i >= 0 && i < c.size } + +func (c *basicColumn) GetColumnSpec() columnMeta { return c.spec } + +func (c *basicColumn) setMetricName(name string) { + c.spec.metric = name + c.name = c.spec.getColumnName() +} + +func (c *basicColumn) SetDataAt(i int, value interface{}) error { + if !c.isValidIndex(i) { + return fmt.Errorf("index %d out of bounds [0:%d]", i, c.size) + } + return nil +} + +func (c *basicColumn) SetData(d interface{}, size int) error { + return errors.New("method not supported") +} +func (c *basicColumn) GetInterpolationFunction() InterpolationFunction { + return c.interpolationFunction +} + +func newDataColumn(name string, colSpec columnMeta, size int, datatype frames.DType) *dataColumn { + dc := &dataColumn{basicColumn: basicColumn{name: name, spec: colSpec, size: size, + interpolationFunction: GetInterpolateFunc(colSpec.interpolationType, colSpec.interpolationTolerance), + builder: frames.NewSliceColumnBuilder(name, datatype, size)}} + return dc + +} + +type dataColumn struct { + basicColumn +} + +// DType returns the data type +func (dc *dataColumn) DType() frames.DType { + return dc.framesCol.DType() +} + +// FloatAt returns float64 value at index i +func (dc *dataColumn) FloatAt(i int) (float64, error) { + return dc.framesCol.FloatAt(i) +} + +// StringAt returns string value at index i +func (dc *dataColumn) StringAt(i int) (string, error) { + return dc.framesCol.StringAt(i) +} + +// TimeAt returns time.Time value at index i +func (dc *dataColumn) TimeAt(i int) (time.Time, error) { + return dc.framesCol.TimeAt(i) +} + +func (dc *dataColumn) SetData(d interface{}, size int) error { + dc.size = size + var err error + dc.framesCol, err = frames.NewSliceColumn(dc.name, d) + return err +} + +func (dc *dataColumn) SetDataAt(i int, value interface{}) error { + if !dc.isValidIndex(i) { + return fmt.Errorf("index %d out of bounds [0:%d]", i, dc.size) + } + + var err error + switch value.(type) { + case float64: + // Update requested cell, only if not trying to override an existing value with NaN + prev, _ := dc.builder.At(i) + if !(math.IsNaN(value.(float64)) && prev != nil && !math.IsNaN(prev.(float64))) { + err = dc.builder.Set(i, value) + } + default: + err = dc.builder.Set(i, value) + } + return err +} + +func NewConcreteColumn(name string, colSpec columnMeta, size int, setFunc func(old, new interface{}) interface{}) *ConcreteColumn { + col := &ConcreteColumn{basicColumn: basicColumn{name: name, spec: colSpec, size: size, + interpolationFunction: GetInterpolateFunc(colSpec.interpolationType, colSpec.interpolationTolerance), + builder: frames.NewSliceColumnBuilder(name, frames.FloatType, size)}, setFunc: setFunc} + return col +} + +type ConcreteColumn struct { + basicColumn + setFunc func(old, new interface{}) interface{} +} + +func (c *ConcreteColumn) DType() frames.DType { + return c.framesCol.DType() +} +func (c *ConcreteColumn) FloatAt(i int) (float64, error) { + return c.framesCol.FloatAt(i) +} +func (c *ConcreteColumn) StringAt(i int) (string, error) { + return "", errors.New("aggregated column does not support string type") +} +func (c *ConcreteColumn) TimeAt(i int) (time.Time, error) { + return time.Unix(0, 0), errors.New("aggregated column does not support time type") +} +func (c *ConcreteColumn) SetDataAt(i int, val interface{}) error { + if !c.isValidIndex(i) { + return fmt.Errorf("index %d out of bounds [0:%d]", i, c.size) + } + value, _ := c.builder.At(i) + err := c.builder.Set(i, c.setFunc(value, val)) + return err +} + +func NewVirtualColumn(name string, colSpec columnMeta, size int, function func([]Column, int) (interface{}, error)) Column { + col := &virtualColumn{basicColumn: basicColumn{name: name, spec: colSpec, size: size, + interpolationFunction: GetInterpolateFunc(colSpec.interpolationType, colSpec.interpolationTolerance), + builder: frames.NewSliceColumnBuilder(name, frames.FloatType, size)}, + function: function} + return col +} + +type virtualColumn struct { + basicColumn + dependantColumns []Column + function func([]Column, int) (interface{}, error) +} + +func (c *virtualColumn) finish() error { + data := make([]float64, c.Len()) + var err error + for i := 0; i < c.Len(); i++ { + value, err := c.function(c.dependantColumns, i) + if err != nil { + return err + } + data[i] = value.(float64) + } + + c.framesCol, err = frames.NewSliceColumn(c.name, data) + if err != nil { + return err + } + return nil +} + +func (c *virtualColumn) DType() frames.DType { + return c.framesCol.DType() +} +func (c *virtualColumn) FloatAt(i int) (float64, error) { + return c.framesCol.FloatAt(i) +} +func (c *virtualColumn) StringAt(i int) (string, error) { + return c.framesCol.StringAt(i) +} +func (c *virtualColumn) TimeAt(i int) (time.Time, error) { + return time.Unix(0, 0), errors.New("aggregated column does not support time type") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate.go new file mode 100644 index 00000000..014213c6 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate.go @@ -0,0 +1,120 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package pquerier + +import ( + "fmt" + "math" + "strings" +) + +type InterpolationType uint8 + +func (it InterpolationType) String() string { + switch it { + case interpolateNone: + return "none" + case interpolateNaN: + return "nan" + case interpolatePrev: + return "prev_val" + case interpolateNext: + return "next_val" + case interpolateLinear: + return "linear" + default: + return "unknown" + } +} + +const ( + interpolateNone InterpolationType = 0 + interpolateNaN InterpolationType = 1 + interpolatePrev InterpolationType = 2 + interpolateNext InterpolationType = 3 + interpolateLinear InterpolationType = 4 + defaultInterpolation InterpolationType = interpolateNext +) + +type InterpolationFunction func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) + +func StrToInterpolateType(str string) (InterpolationType, error) { + switch strings.ToLower(str) { + case "none", "": + return interpolateNone, nil + case "nan": + return interpolateNaN, nil + case "prev_val": + return interpolatePrev, nil + case "next_val": + return interpolateNext, nil + case "lin", "linear": + return interpolateLinear, nil + } + return 0, fmt.Errorf("unknown/unsupported interpulation function %s", str) +} + +// return line interpolation function, estimate seek value based on previous and next points +func GetInterpolateFunc(alg InterpolationType, tolerance int64) InterpolationFunction { + switch alg { + case interpolateNaN: + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + return tseek, math.NaN() + } + case interpolatePrev: + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + if absoluteDiff(tseek, tprev) > tolerance { + return 0, 0 + } + return tseek, vprev + } + case interpolateNext: + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + if absoluteDiff(tnext, tseek) > tolerance { + return 0, 0 + } + return tseek, vnext + } + case interpolateLinear: + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + if (absoluteDiff(tseek, tprev) > tolerance) || absoluteDiff(tnext, tseek) > tolerance { + return 0, 0 + } + if math.IsNaN(vprev) || math.IsNaN(vnext) { + return tseek, math.NaN() + } + v := vprev + (vnext-vprev)*float64(tseek-tprev)/float64(tnext-tprev) + return tseek, v + } + default: + // None interpolation + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + return tnext, vnext + } + } +} + +func absoluteDiff(a, b int64) int64 { + if a > b { + return a - b + } + return b - a +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate_test.go new file mode 100644 index 00000000..8f082fe0 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate_test.go @@ -0,0 +1,86 @@ +// +build unit + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package pquerier + +import ( + "math" + "testing" + + "github.com/stretchr/testify/suite" +) + +type testInterpolationSuite struct { + suite.Suite +} + +func (suite *testInterpolationSuite) TestNone() { + fntype, err := StrToInterpolateType("") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(110)) + suite.Require().Equal(v, 200.0) +} + +func (suite *testInterpolationSuite) TestNaN() { + fntype, err := StrToInterpolateType("nan") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(math.IsNaN(v), true) +} + +func (suite *testInterpolationSuite) TestPrev() { + fntype, err := StrToInterpolateType("prev_val") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(v, 100.0) +} + +func (suite *testInterpolationSuite) TestNext() { + fntype, err := StrToInterpolateType("next_val") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(v, 200.0) +} + +func (suite *testInterpolationSuite) TestLin() { + fntype, err := StrToInterpolateType("lin") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(v, 150.0) + t, v = fn(10, 110, 60, 100, math.NaN()) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(math.IsNaN(v), true) +} + +func TestInterpolationSuite(t *testing.T) { + suite.Run(t, new(testInterpolationSuite)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go new file mode 100644 index 00000000..45b8724e --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go @@ -0,0 +1,708 @@ +// +build integration + +package pqueriertest + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testClientAggregatesSuite struct { + basicQueryTestSuite +} + +func TestClientAggregatesSuite(t *testing.T) { + suite.Run(t, new(testClientAggregatesSuite)) +} + +func (suite *testClientAggregatesSuite) TestQueryAggregateWithNameWildcard() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedData := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + expected := map[string]map[string][]tsdbtest.DataPoint{"cpu": expectedData, "diskio": expectedData} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Functions: "max,min,sum", Step: 2 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + metricName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + aggr := set.At().Labels().Get(aggregate.AggregateLabel) + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareMultipleMetrics(data, expected, metricName, aggr) + } + + assert.Equal(suite.T(), len(expectedData)*len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestQueryAggregateWithFilterOnMetricName() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedData := map[string][]tsdbtest.DataPoint{"max": {{Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + expected := map[string]map[string][]tsdbtest.DataPoint{"cpu": expectedData} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Functions: "max", Step: 2 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), Filter: "_name=='cpu'"} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + metricName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + aggr := set.At().Labels().Get(aggregate.AggregateLabel) + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareMultipleMetrics(data, expected, metricName, aggr) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestClientAggregatesSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "sum,max,min", Step: 2 * 60 * 1000, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Labels: labels1, + Name: "cpu", + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 90}}, + "min": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 20}}, + "max": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min", + Step: 5 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionNonConcreteAggregates() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + tsdbtest.MinuteInMillis, 12}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"avg": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 11}, {Time: suite.basicQueryTime, Value: 30}}, + "stdvar": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 2}, {Time: suite.basicQueryTime, Value: 100}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "avg,stdvar", + Step: 5 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionOneStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 25*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 20*tsdbtest.DaysInMillis, 20}, + {suite.basicQueryTime - 12*tsdbtest.DaysInMillis, 30}, + {suite.basicQueryTime - 1*tsdbtest.DaysInMillis, 40}, + {suite.basicQueryTime + 20*tsdbtest.DaysInMillis, 50}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"count": {{Time: suite.basicQueryTime - 25*tsdbtest.DaysInMillis, Value: 5}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "count", + Step: 0, + From: suite.basicQueryTime - 25*tsdbtest.DaysInMillis, + To: suite.basicQueryTime + 21*tsdbtest.DaysInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestGetEmptyResponse() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "sum,max,min,sqr", + Step: 1 * 60 * 60 * 1000, + From: suite.basicQueryTime - 10*tsdbtest.DaysInMillis, + To: suite.basicQueryTime - 8*tsdbtest.DaysInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + } + + assert.Equal(suite.T(), 0, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestSelectAggregatesByRequestedColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu", Function: "max"}, {Metric: "cpu", Function: "min"}, {Metric: "cpu", Function: "sum"}}, + Step: 2 * 60 * 1000, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestSelectAggregatesAndRawByRequestedColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "": {{suite.basicQueryTime, 10}, {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu", Function: "sum"}, {Metric: "cpu"}}, + Step: 2 * 60 * 1000, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestQueryAllData() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min", + Step: 2 * 60 * 1000, + From: 0, + To: math.MaxInt64} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestAggregatesWithZeroStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "max": {{Time: suite.basicQueryTime, Value: 40}}, + "min": {{Time: suite.basicQueryTime, Value: 10}}, + "sum": {{Time: suite.basicQueryTime, Value: 100}}, + "count": {{Time: suite.basicQueryTime, Value: 4}}, + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "max, sum,count,min", Step: 0, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expected[agg] { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 4, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestUsePreciseAggregationsConfig() { + suite.v3ioConfig.UsePreciseAggregations = true + defer func() { suite.v3ioConfig.UsePreciseAggregations = false }() + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter.") + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 100}}, + "min": {{Time: suite.basicQueryTime, Value: 10}}, + "max": {{Time: suite.basicQueryTime, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2.") + + params := &pquerier.SelectParams{Name: "cpu", Functions: "sum,max,min", Step: 1 * 60 * 60 * 1000, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + suite.NoError(err, "failed to exeute query,") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(3, seriesCount, "series count didn't match expected") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go new file mode 100644 index 00000000..624ec921 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go @@ -0,0 +1,770 @@ +// +build integration + +package pqueriertest + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testCrossSeriesAggregatesSuite struct { + basicQueryTestSuite +} + +func TestCrossSeriesAggregatesSuite(t *testing.T) { + suite.Run(t, new(testCrossSeriesAggregatesSuite)) +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesTimesFallsOnStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 20}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 30}}, + "avg": {{Time: suite.basicQueryTime, Value: 15}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 25}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 35}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum_all,min_all,avg_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregates() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 20}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 30}}, + "avg": {{Time: suite.basicQueryTime, Value: 15}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 25}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 35}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum_all,min_all,avg_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiPartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 60}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 20}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 30}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "max": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 20}, + {Time: suite.basicQueryTime - 4*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime - 2*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 60}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "max_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime + 3*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterpolation() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 40}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 20}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select sum_all(prev_val(cpu)), min_all(prev_val(cpu)), max_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 2 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime + selectParams.To = suite.basicQueryTime + 5*tsdbtest.MinuteInMillis + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiPartitionExactlyOnStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 60}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 20}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 30}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 30}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime, Value: 50}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 100}}, + "min": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}, + "avg": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 15}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime, Value: 25}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select sum_all(prev_val(cpu)), min_all(prev_val(cpu)),avg_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 2 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime - 7*tsdbtest.DaysInMillis + selectParams.To = suite.basicQueryTime + 3*tsdbtest.MinuteInMillis + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiPartitionWithInterpolation() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 3*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 60}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 20}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 30}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 30}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 21}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 6*tsdbtest.MinuteInMillis, Value: 21}, + {Time: suite.basicQueryTime, Value: 50}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 61}}, + "count": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 6*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime, Value: 2}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 2}}, + "min": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 6*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 1}}, + "avg": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 15}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 10.5}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 6*tsdbtest.MinuteInMillis, Value: 10.5}, + {Time: suite.basicQueryTime, Value: 25}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30.5}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select sum_all(prev_val(cpu)), min_all(prev_val(cpu)),avg_all(prev_val(cpu)),count_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 2 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime - 7*tsdbtest.DaysInMillis + selectParams.To = suite.basicQueryTime + 3*tsdbtest.MinuteInMillis + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterpolationOverTolerance() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select sum_all(prev_val(cpu)), min_all(prev_val(cpu)), max_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 5 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime + selectParams.To = suite.basicQueryTime + 10*tsdbtest.MinuteInMillis + for i := 0; i < len(selectParams.RequestedColumns); i++ { + selectParams.RequestedColumns[i].InterpolationTolerance = tsdbtest.MinuteInMillis + } + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}}, + "min": {{Time: suite.basicQueryTime, Value: 10}}, + "max": {{Time: suite.basicQueryTime, Value: 20}}, + "count": {{Time: suite.basicQueryTime, Value: 2}}, + "avg": {{Time: suite.basicQueryTime, Value: 15}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum_all,min_all,max_all,count_all,avg_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 1*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestOnlyVirtualCrossSeriesAggregateWithInterpolation() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 20}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 20}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "avg": {{Time: suite.basicQueryTime, Value: 15}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 10.5}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select avg_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 2 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime + selectParams.To = suite.basicQueryTime + 5*tsdbtest.MinuteInMillis + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSameLabelMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "disk", + Labels: labels1, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum-cpu": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 20}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 30}}, + "sum-disk": {{Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu, disk", + Functions: "sum_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + suite.NoError(err) + + agg := set.At().Labels().Get(aggregate.AggregateLabel) + suite.NoError(err) + + metricName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + suite.NoError(err) + + suite.compareSingleMetricWithAggregator(data, expected, fmt.Sprintf("%v-%v", agg, metricName)) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesDifferentLabelMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "darwin") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + tsdbtest.Metric{ + Name: "disk", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "disk", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := []tsdbtest.DataPoint{ + {Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 70}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu, disk", + Functions: "sum_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + suite.NoError(err) + + suite.compareSingleMetric(data, expected) + } + + suite.Require().Equal(2, seriesCount, "series count didn't match expected") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go new file mode 100644 index 00000000..19e81d08 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go @@ -0,0 +1,1321 @@ +// +build integration + +package pqueriertest + +import ( + "errors" + "fmt" + "math" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/frames" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testSelectDataframeSuite struct { + basicQueryTestSuite +} + +func TestSelectDataframeSuite(t *testing.T) { + suite.Run(t, new(testSelectDataframeSuite)) +} + +func (suite *testSelectDataframeSuite) TestAggregatesWithZeroStepSelectDataframe() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string]tsdbtest.DataPoint{"max": {Time: suite.basicQueryTime, Value: 40}, + "min": {Time: suite.basicQueryTime, Value: 10}, + "sum": {Time: suite.basicQueryTime, Value: 100}, + "count": {Time: suite.basicQueryTime, Value: 4}, + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "max, sum,count,min", Step: 0, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 1, indexCol.Len()) + t, err := indexCol.TimeAt(0) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), suite.basicQueryTime, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + suite.Require().Equal(1, col.Len()) + currentColAggregate := strings.Split(col.Name(), "(")[0] + f, err := col.FloatAt(0) + assert.NoError(suite.T(), err) + + var expectedFloat float64 + switch val := expected[currentColAggregate].Value.(type) { + case int: + expectedFloat = float64(val) + case float64: + expectedFloat = val + default: + suite.Failf("invalid data type", "expected int or float, actual type is %t", val) + } + suite.Require().Equal(expectedFloat, f) + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestEmptyRawDataSelectDataframe() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime - 10*tsdbtest.MinuteInMillis, To: suite.basicQueryTime - 1*tsdbtest.MinuteInMillis} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + suite.Require().Equal(0, frame.Indices()[0].Len()) + + for _, colName := range frame.Names() { + col, _ := frame.Column(colName) + assert.Equal(suite.T(), 0, col.Len()) + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) Test2Series1EmptySelectDataframe() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: []tsdbtest.DataPoint{{suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, 10}}}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"cpu": ingestedData, + "diskio": {{suite.basicQueryTime, math.NaN()}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), math.NaN()}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, math.NaN()}}, + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params, _, _ := pquerier.ParseQuery("select cpu,diskio") + params.From = suite.basicQueryTime + params.To = suite.basicQueryTime + 4*tsdbtest.MinuteInMillis + + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), len(ingestedData), indexCol.Len()) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), ingestedData[i].Time, t.UnixNano()/int64(time.Millisecond)) + } + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + assert.Equal(suite.T(), len(ingestedData), col.Len()) + for i := 0; i < col.Len(); i++ { + currentExpected := expected[col.Name()][i].Value + switch val := currentExpected.(type) { + case float64: + fv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(fv)) { + assert.Equal(suite.T(), currentExpected, fv) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + sv, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, sv) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestStringAndFloatMetricsDataframe() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + metricName1 := "cpu" + metricName2 := "log" + labels := utils.LabelsFromStringList("os", "linux") + labelsWithName := append(labels, utils.LabelsFromStringList("__name__", metricName2)...) + + expectedTimeColumn := []int64{suite.basicQueryTime, suite.basicQueryTime + tsdbtest.MinuteInMillis, suite.basicQueryTime + 2*tsdbtest.MinuteInMillis} + logData := []interface{}{"a", "b", "c"} + expectedColumns := map[string][]interface{}{metricName1: {10.0, 20.0, 30.0}, + metricName2: logData} + appender, err := adapter.Appender() + suite.NoError(err, "failed to create v3io appender") + + ref, err := appender.Add(labelsWithName, expectedTimeColumn[0], logData[0]) + suite.NoError(err, "failed to add data to the TSDB appender") + for i := 1; i < len(expectedTimeColumn); i++ { + appender.AddFast(labels, ref, expectedTimeColumn[i], logData[i]) + } + + _, err = appender.WaitForCompletion(0) + suite.NoError(err, "failed to wait for TSDB append completion") + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels, + Data: []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}}}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName1}, {Metric: metricName2}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + suite.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err) + indexCol := frame.Indices()[0] + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + suite.Require().Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + var v interface{} + + column, err := frame.Column(columnName) + suite.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + suite.Require().Equal(expectedColumns[column.Name()][i], v, "column %v does not match at index %v", column.Name(), i) + } + } + } +} + +func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetricsWithMultipleLabelSets() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime + tsdbtest.MinuteInMillis, 20}} + ingestData3 := []tsdbtest.DataPoint{{suite.basicQueryTime, 30}, + {suite.basicQueryTime + tsdbtest.MinuteInMillis, 40}} + + expectedData := map[string][]tsdbtest.DataPoint{ + fmt.Sprintf("%v-%v", metricName1, "linux"): {{suite.basicQueryTime, 10}, {suite.basicQueryTime + tsdbtest.MinuteInMillis, math.NaN()}}, + fmt.Sprintf("%v-%v", metricName2, "linux"): {{suite.basicQueryTime, math.NaN()}, {suite.basicQueryTime + tsdbtest.MinuteInMillis, 20}}, + fmt.Sprintf("%v-%v", metricName2, "mac"): ingestData3} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels1, + Data: ingestData2}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels2, + Data: ingestData3}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Filter: "1==1", + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 2, indexCol.Len()) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), expectedData[fmt.Sprintf("%v-%v", metricName1, "linux")][i].Time, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + currentExpectedData := expectedData[fmt.Sprintf("%v-%v", col.Name(), frame.Labels()["os"])] + assert.Equal(suite.T(), len(currentExpectedData), col.Len()) + currentExpected := currentExpectedData[i].Value + + switch val := currentExpected.(type) { + case float64: + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + s, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, s) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestSelectDataframeAggregationsMetricsHaveBigGaps() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData1 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime - 4*tsdbtest.DaysInMillis), 20}} + + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 1*tsdbtest.DaysInMillis, 30}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu1", + Labels: labels1, + Data: ingestedData1}, + tsdbtest.Metric{ + Name: "cpu2", + Labels: labels1, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expectedTime := []int64{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, suite.basicQueryTime - 4*tsdbtest.DaysInMillis, suite.basicQueryTime - 1*tsdbtest.DaysInMillis} + expected := map[string][]float64{"count(cpu1)": {1, 1, math.NaN()}, + "count(cpu2)": {math.NaN(), math.NaN(), 1}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{ + Functions: "count", + Step: int64(tsdbtest.MinuteInMillis), + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var dataFrameCount int + for set.NextFrame() { + dataFrameCount++ + frame, err := set.GetFrame() + suite.Require().NoError(err) + suite.Require().Equal(len(expected), len(frame.Names()), "number of columns in frame does not match") + suite.Require().Equal(len(expectedTime), frame.Indices()[0].Len(), "columns size is not as expected") + + indexCol := frame.Indices()[0] + + for i := 0; i < len(expected); i++ { + t, err := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + suite.Require().NoError(err) + suite.Require().Equal(expectedTime[i], timeMillis) + + for _, currName := range frame.Names() { + currCol, err := frame.Column(currName) + suite.Require().NoError(err) + currVal, err := currCol.FloatAt(i) + + suite.Require().NoError(err) + if !(math.IsNaN(currVal) && math.IsNaN(expected[currName][i])) { + suite.Require().Equal(expected[currName][i], currVal) + } + } + } + } + + suite.Require().Equal(1, dataFrameCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestSelectDataframeDaownsampleMetricsHaveBigGaps() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData1 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime - 4*tsdbtest.DaysInMillis), 20}} + + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 1*tsdbtest.DaysInMillis, 30}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu1", + Labels: labels1, + Data: ingestedData1}, + tsdbtest.Metric{ + Name: "cpu2", + Labels: labels1, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expectedTime := []int64{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + suite.basicQueryTime - 4*tsdbtest.DaysInMillis - 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime - 4*tsdbtest.DaysInMillis - 1*tsdbtest.MinuteInMillis, + suite.basicQueryTime - 4*tsdbtest.DaysInMillis, + suite.basicQueryTime - 1*tsdbtest.DaysInMillis - 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime - 1*tsdbtest.DaysInMillis - 1*tsdbtest.MinuteInMillis, + suite.basicQueryTime - 1*tsdbtest.DaysInMillis} + expected := map[string][]float64{"cpu1": {10, 20, 20, 20, math.NaN(), math.NaN(), math.NaN()}, + "cpu2": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), 30, 30, 30}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{ + Step: int64(tsdbtest.MinuteInMillis), + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var dataFrameCount int + for set.NextFrame() { + dataFrameCount++ + frame, err := set.GetFrame() + suite.Require().NoError(err) + suite.Require().Equal(len(expected), len(frame.Names()), "number of columns in frame does not match") + suite.Require().Equal(len(expectedTime), frame.Indices()[0].Len(), "columns size is not as expected") + + indexCol := frame.Indices()[0] + + for i := 0; i < len(expected); i++ { + t, err := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + suite.Require().NoError(err) + suite.Require().Equal(expectedTime[i], timeMillis) + + for _, currName := range frame.Names() { + currCol, err := frame.Column(currName) + suite.Require().NoError(err) + currVal, err := currCol.FloatAt(i) + + suite.Require().NoError(err) + if !(math.IsNaN(currVal) && math.IsNaN(expected[currName][i])) { + suite.Require().Equal(expected[currName][i], currVal) + } + } + } + } + + suite.Require().Equal(1, dataFrameCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 15}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 18}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime + tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 22}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 26}} + + expectedData := map[string][]tsdbtest.DataPoint{ + metricName1: {{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 15}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 18}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, math.NaN()}}, + metricName2: {{suite.basicQueryTime, math.NaN()}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 22}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 26}}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels1, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Filter: "1==1", + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.SelectDataFrame(params) + suite.NoError(err, "failed to exeute query") + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 6, indexCol.Len()) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + suite.Require().Equal(expectedData[metricName1][i].Time, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + currentExpectedData := expectedData[col.Name()] + suite.Require().Equal(len(currentExpectedData), col.Len()) + currentExpected := currentExpectedData[i].Value + + switch val := currentExpected.(type) { + case float64: + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + s, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, s) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } + + suite.Require().Equal(1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestColumnOrder() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 15}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 18}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime + tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 22}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 26}} + + expectedData := map[string][]tsdbtest.DataPoint{ + metricName1: {{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 15}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 18}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, math.NaN()}}, + metricName2: {{suite.basicQueryTime, math.NaN()}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 22}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 26}}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels1, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2") + + columnOrder := "diskio,cpu" + params := &pquerier.SelectParams{Name: columnOrder, + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.SelectDataFrame(params) + suite.NoError(err, "failed to exeute query") + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 6, indexCol.Len()) + suite.Require().Equal(columnOrder, strings.Join(frame.Names(), ",")) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + suite.Require().Equal(expectedData[metricName1][i].Time, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + currentExpectedData := expectedData[col.Name()] + suite.Require().Equal(len(currentExpectedData), col.Len()) + currentExpected := currentExpectedData[i].Value + switch val := currentExpected.(type) { + case float64: + fv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(fv)) { + assert.Equal(suite.T(), currentExpected, fv) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + sv, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, sv) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } + + suite.Require().Equal(1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestQueryNonExistingMetric() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels := utils.LabelsFromStringList("os", "linux") + cpuData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels, + Data: cpuData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu, tal", + From: suite.basicQueryTime, To: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + suite.Require().NoError(err) + + expectedData := map[string][]tsdbtest.DataPoint{ + "cpu": {{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}}, + "tal": {{suite.basicQueryTime, math.NaN()}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), math.NaN()}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, math.NaN()}}} + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 4, indexCol.Len()) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + suite.Require().Equal(expectedData["cpu"][i].Time, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + currentExpectedData := expectedData[col.Name()] + suite.Require().Equal(len(currentExpectedData), col.Len()) + currentExpected := currentExpectedData[i].Value + + switch val := currentExpected.(type) { + case float64: + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + s, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, s) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseStringAndNumericColumnsDataframe() { + requireCtx := suite.Require() + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + requireCtx.NoError(err, "failed to create v3io adapter") + + metricCpu := "cpu" + metricLog := "log" + labels := utils.LabelsFromStringList("os", "linux") + labelsWithNameLog := append(labels, utils.LabelsFromStringList("__name__", metricLog)...) + + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + + timeColumnLog := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + + dataLog := []interface{}{"a", "c", "d", "e"} + expectedColumns := map[string][]interface{}{ + metricCpu: {10.0, 20.0, 30.0, math.NaN(), 50.0}, + metricLog: {"a", "", "c", "d", "e"}} + appender, err := adapter.Appender() + requireCtx.NoError(err, "failed to create v3io appender") + + refLog, err := appender.Add(labelsWithNameLog, timeColumnLog[0], dataLog[0]) + suite.NoError(err, "failed to add data to the TSDB appender") + for i := 1; i < len(timeColumnLog); i++ { + appender.AddFast(labels, refLog, timeColumnLog[i], dataLog[i]) + } + + _, err = appender.WaitForCompletion(0) + requireCtx.NoError(err, "failed to wait for TSDB append completion") + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricCpu, + Labels: labels, + Data: []tsdbtest.DataPoint{ + {suite.basicQueryTime, 10.0}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20.0}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30.0}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 50.0}}}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricCpu}, {Metric: metricLog}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[column.Name()][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + requireCtx.Equal(expectedColumns[column.Name()][i], v, "column %v does not match at index %v", column.Name(), i) + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseNumericColumnsWithEmptyColumnsDataframe() { + requireCtx := suite.Require() + labelSetLinux := utils.LabelsFromStringList("os", "linux") + labelSetWindows := utils.LabelsFromStringList("os", "windows") + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]interface{}{ + "cpu_0-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_0-windows": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1-linux": {10.0, 20.0, 30.0, math.NaN(), 50.0}, + "cpu_1-windows": {math.NaN(), 22.0, 33.0, math.NaN(), 55.0}, + "cpu_2-linux": {math.NaN(), math.NaN(), math.NaN(), 40.4, 50.5}, + "cpu_2-windows": {10.0, 20.0, math.NaN(), 40.0, 50.0}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "cpu_0", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0] - 68*tsdbtest.HoursInMillis, 10.0}, + {expectedTimeColumn[1] - 69*tsdbtest.HoursInMillis, 20.0}, + {expectedTimeColumn[2] - 70*tsdbtest.HoursInMillis, 30.0}, + {expectedTimeColumn[3] - 71*tsdbtest.HoursInMillis, 40.0}, + {expectedTimeColumn[4] - 72*tsdbtest.HoursInMillis, 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + {expectedTimeColumn[2], 30.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + // NA + // NA + {expectedTimeColumn[3], 40.4}, + {expectedTimeColumn[4], 50.5}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + // NA + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + // NA + {expectedTimeColumn[1], 22.0}, + {expectedTimeColumn[2], 33.0}, + // NA + {expectedTimeColumn[4], 55.0}}}, + }}) + + adapter := tsdbtest.InsertData(suite.T(), testParams) + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu_0"}, {Metric: "cpu_1"}, {Metric: "cpu_2"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + osLabel := frame.Labels()["os"] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + var v interface{} + key := fmt.Sprintf("%v-%v", columnName, osLabel) + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[key][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + expectedValue := expectedColumns[key][i] + if !math.IsNaN(expectedValue.(float64)) || !math.IsNaN(v.(float64)) { + requireCtx.Equal(expectedValue, v, "column %v does not match at index %v", columnName, i) + } + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseNumericColumnsWithPartialLabelsDataframe() { + requireCtx := suite.Require() + labelSetLinux := utils.LabelsFromStringList("os", "linux") + labelSetWindows := utils.LabelsFromStringList("os", "windows") + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]interface{}{ + "cpu_0-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_0-windows": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1-linux": {10.0, 20.0, 30.0, 40.0, 50.0}, + "cpu_1-windows": {math.NaN(), 22.0, 33.0, math.NaN(), 55.0}, + "cpu_2-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_2-windows": {10.0, 20.0, math.NaN(), 40.0, 50.0}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "cpu_0", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0] - 68*tsdbtest.HoursInMillis, 10.0}, + {expectedTimeColumn[1] - 69*tsdbtest.HoursInMillis, 20.0}, + {expectedTimeColumn[2] - 70*tsdbtest.HoursInMillis, 30.0}, + {expectedTimeColumn[3] - 71*tsdbtest.HoursInMillis, 40.0}, + {expectedTimeColumn[4] - 72*tsdbtest.HoursInMillis, 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + {expectedTimeColumn[2], 30.0}, + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + // NA + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + // NA + {expectedTimeColumn[1], 22.0}, + {expectedTimeColumn[2], 33.0}, + // NA + {expectedTimeColumn[4], 55.0}}}, + }}) + + adapter := tsdbtest.InsertData(suite.T(), testParams) + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu_0"}, {Metric: "cpu_1"}, {Metric: "cpu_2"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + osLabel := frame.Labels()["os"] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + key := fmt.Sprintf("%v-%v", columnName, osLabel) + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[key][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + expectedValue := expectedColumns[key][i] + if !math.IsNaN(expectedValue.(float64)) || !math.IsNaN(v.(float64)) { + requireCtx.Equal(expectedValue, v, "column %v does not match at index %v", columnName, i) + } + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseNumericColumnsWithNotExistingMetricDataframe() { + requireCtx := suite.Require() + labelSetLinux := utils.LabelsFromStringList("os", "linux") + labelSetWindows := utils.LabelsFromStringList("os", "windows") + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]interface{}{ + "cpu_0-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1-linux": {10.0, 20.0, 30.0, 40.0, 50.0}, + "cpu_2-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "fake-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_0-windows": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1-windows": {math.NaN(), 22.0, 33.0, math.NaN(), 55.0}, + "cpu_2-windows": {10.0, 20.0, math.NaN(), 40.0, 50.0}, + "fake-windows": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "cpu_0", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0] - 68*tsdbtest.HoursInMillis, 10.0}, + {expectedTimeColumn[1] - 69*tsdbtest.HoursInMillis, 20.0}, + {expectedTimeColumn[2] - 70*tsdbtest.HoursInMillis, 30.0}, + {expectedTimeColumn[3] - 71*tsdbtest.HoursInMillis, 40.0}, + {expectedTimeColumn[4] - 72*tsdbtest.HoursInMillis, 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + {expectedTimeColumn[2], 30.0}, + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + // NA + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + // NA + {expectedTimeColumn[1], 22.0}, + {expectedTimeColumn[2], 33.0}, + // NA + {expectedTimeColumn[4], 55.0}}}, + }}) + + adapter := tsdbtest.InsertData(suite.T(), testParams) + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu_0"}, {Metric: "cpu_1"}, {Metric: "cpu_2"}, {Metric: "fake"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + osLabel := frame.Labels()["os"] + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %d", i) + for _, columnName := range frame.Names() { + key := fmt.Sprintf("%v-%v", columnName, osLabel) + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + + bothNaN := math.IsNaN(expectedColumns[key][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + expectedValue := expectedColumns[key][i] + if !math.IsNaN(expectedValue.(float64)) || !math.IsNaN(v.(float64)) { + requireCtx.Equal(expectedValue, v, "column %v does not match at index %d", columnName, i) + } + } + } + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go new file mode 100644 index 00000000..e2064425 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go @@ -0,0 +1,183 @@ +// +build integration + +package pqueriertest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testDownsampleSuite struct { + basicQueryTestSuite +} + +func TestDownsampleSuite(t *testing.T) { + suite.Run(t, new(testDownsampleSuite)) +} + +func (suite *testDownsampleSuite) TestDownSampleNotReturningAggrAttr() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 6*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 9*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Step: 2 * int64(tsdbtest.MinuteInMillis), From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + labels := set.At().Labels() + suite.Require().Empty(labels.Get(aggregate.AggregateLabel)) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testDownsampleSuite) TestRawDataSinglePartitionWithDownSample() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 6*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 9*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 6*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 8*tsdbtest.MinuteInMillis, 40}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Step: 2 * int64(tsdbtest.MinuteInMillis), + From: suite.basicQueryTime, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testDownsampleSuite) TestRawDataDownSampleMultiPartitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestData := []tsdbtest.DataPoint{{suite.toMillis("2018-11-18T23:40:00Z"), 10}, + {suite.toMillis("2018-11-18T23:59:00Z"), 20}, + {suite.toMillis("2018-11-19T00:20:00Z"), 30}, + {suite.toMillis("2018-11-19T02:40:00Z"), 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expectedData := []tsdbtest.DataPoint{{suite.toMillis("2018-11-18T22:00:00Z"), 10}, + {suite.toMillis("2018-11-19T00:00:00Z"), 30}, + {suite.toMillis("2018-11-19T02:00:00Z"), 40}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu"}}, + Step: 2 * int64(tsdbtest.HoursInMillis), + From: suite.toMillis("2018-11-18T22:00:00Z"), + To: suite.toMillis("2018-11-19T4:00:00Z")} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/get_labelsets_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/get_labelsets_integration_test.go new file mode 100644 index 00000000..0e95cba0 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/get_labelsets_integration_test.go @@ -0,0 +1,248 @@ +// +build integration + +package pqueriertest + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type getLabelSetsSuite struct { + suite.Suite + v3ioConfig *config.V3ioConfig + suiteTimestamp int64 + basicQueryTime int64 +} + +func TestGetLabelSetsSuite(t *testing.T) { + suite.Run(t, new(getLabelSetsSuite)) +} + +func (suite *getLabelSetsSuite) SetupSuite() { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + if err != nil { + suite.T().Fatalf("unable to load configuration. Error: %v", err) + } + + suite.v3ioConfig = v3ioConfig + suite.suiteTimestamp = time.Now().Unix() + suite.basicQueryTime, err = tsdbtest.DateStringToMillis("2018-07-21T10:00:00Z") + suite.NoError(err) +} + +func (suite *getLabelSetsSuite) SetupTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + tsdbtest.CreateTestTSDB(suite.T(), suite.v3ioConfig) +} + +func (suite *getLabelSetsSuite) TearDownTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + if !suite.T().Failed() { + tsdbtest.DeleteTSDB(suite.T(), suite.v3ioConfig) + } +} + +func (suite *getLabelSetsSuite) TestGetLabels() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[2], + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "mac", "region", "europe", config.PrometheusMetricNameAttribute, "cpu")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("cpu", "") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} + +func (suite *getLabelSetsSuite) TestGetLabelsAllMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels[2], + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "mac", "region", "europe", config.PrometheusMetricNameAttribute, "diskio")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("", "") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} + +func (suite *getLabelSetsSuite) TestGetLabelsAllSpecificMetric() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels[2], + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("cpu", "") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} + +func (suite *getLabelSetsSuite) TestGetLabelsWithFilter() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[2], + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("cpu", "os=='linux'") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} + +func (suite *getLabelSetsSuite) TestGetLabelsAllMetricsFrom2Partitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: []tsdbtest.DataPoint{{suite.basicQueryTime - 4*tsdbtest.DaysInMillis, 10}}}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: []tsdbtest.DataPoint{{suite.basicQueryTime - 4*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 2*tsdbtest.DaysInMillis, 10}}}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[2], + Data: []tsdbtest.DataPoint{{suite.basicQueryTime, 10}}}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{ + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "mac", "region", "europe", config.PrometheusMetricNameAttribute, "cpu")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("", "") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.Require().ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go new file mode 100644 index 00000000..8346aea1 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go @@ -0,0 +1,86 @@ +// +build integration + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package pqueriertest + +import ( + "fmt" + "time" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" +) + +type basicQueryTestSuite struct { + suite.Suite + v3ioConfig *config.V3ioConfig + suiteTimestamp int64 + basicQueryTime int64 +} + +func (suite *basicQueryTestSuite) toMillis(date string) int64 { + time, err := tsdbtest.DateStringToMillis(date) + suite.NoError(err) + return time +} + +func (suite *basicQueryTestSuite) SetupSuite() { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + if err != nil { + suite.T().Fatalf("unable to load configuration. Error: %v", err) + } + + suite.v3ioConfig = v3ioConfig + suite.suiteTimestamp = time.Now().Unix() + suite.basicQueryTime = suite.toMillis("2018-07-21T21:40:00Z") +} + +func (suite *basicQueryTestSuite) SetupTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + tsdbtest.CreateTestTSDB(suite.T(), suite.v3ioConfig) +} + +func (suite *basicQueryTestSuite) TearDownTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + if !suite.T().Failed() { + tsdbtest.DeleteTSDB(suite.T(), suite.v3ioConfig) + } +} + +func (suite *basicQueryTestSuite) compareSingleMetric(data []tsdbtest.DataPoint, expected []tsdbtest.DataPoint) { + for i, dataPoint := range data { + suite.Require().True(dataPoint.Equals(expected[i]), "queried data does not match expected") + } +} + +func (suite *basicQueryTestSuite) compareSingleMetricWithAggregator(data []tsdbtest.DataPoint, expected map[string][]tsdbtest.DataPoint, agg string) { + for i, dataPoint := range data { + suite.Require().True(dataPoint.Equals(expected[agg][i]), "queried data does not match expected") + } +} + +func (suite *basicQueryTestSuite) compareMultipleMetrics(data []tsdbtest.DataPoint, expected map[string]map[string][]tsdbtest.DataPoint, metricName string, aggr string) { + for i, dataPoint := range data { + suite.Require().True(dataPoint.Equals(expected[metricName][aggr][i]), "queried data does not match expected") + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go new file mode 100644 index 00000000..cb7e646f --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go @@ -0,0 +1,381 @@ +// +build integration + +package pqueriertest + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testSQLSyntaxQuerySuite struct { + basicQueryTestSuite +} + +func TestSQLSyntaxQuerySuite(t *testing.T) { + suite.Run(t, new(testSQLSyntaxQuerySuite)) +} + +func (suite *testSQLSyntaxQuerySuite) TestGroupByOneLabelSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux", "region", "europe") + labels2 := utils.LabelsFromStringList("os", "mac", "region", "europe") + labels3 := utils.LabelsFromStringList("os", "linux", "region", "americas") + labels4 := utils.LabelsFromStringList("os", "linux", "region", "asia") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels3, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels4, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string]map[string][]tsdbtest.DataPoint{ + "linux": { + "sum": {{Time: suite.basicQueryTime, Value: 30}}, + "count": {{Time: suite.basicQueryTime, Value: 3}}}, + "mac": { + "sum": {{Time: suite.basicQueryTime, Value: 10}}, + "count": {{Time: suite.basicQueryTime, Value: 1}}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,count", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), + GroupBy: "os"} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "failed to exeute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + groupByValue := set.At().Labels().Get("os") + suite.Require().NoError(err) + suite.compareMultipleMetrics(data, expected, groupByValue, agg) + } + + suite.Require().Equal(4, seriesCount, "series count didn't match expected") +} + +func (suite *testSQLSyntaxQuerySuite) TestGroupByMultipleLabelsSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux", "region", "europe", "version", "1") + labels2 := utils.LabelsFromStringList("os", "linux", "region", "europe", "version", "2") + labels3 := utils.LabelsFromStringList("os", "linux", "region", "americas", "version", "3") + labels4 := utils.LabelsFromStringList("os", "mac", "region", "asia", "version", "1") + labels5 := utils.LabelsFromStringList("os", "mac", "region", "asia", "version", "2") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels3, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels4, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels5, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + groupBy := []string{"os", "region"} + expected := map[string]map[string][]tsdbtest.DataPoint{ + "linux-europe": { + "sum": {{Time: suite.basicQueryTime, Value: 20}}, + "count": {{Time: suite.basicQueryTime, Value: 2}}}, + "linux-americas": { + "sum": {{Time: suite.basicQueryTime, Value: 10}}, + "count": {{Time: suite.basicQueryTime, Value: 1}}}, + "mac-asia": { + "sum": {{Time: suite.basicQueryTime, Value: 20}}, + "count": {{Time: suite.basicQueryTime, Value: 2}}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,count", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), + GroupBy: strings.Join(groupBy, ",")} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "failed to exeute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + var groupByValue []string + for _, label := range groupBy { + groupByValue = append(groupByValue, set.At().Labels().Get(label)) + } + labelsStr := strings.Join(groupByValue, "-") + + suite.Require().NoError(err) + suite.compareMultipleMetrics(data, expected, labelsStr, agg) + } + + suite.Require().Equal(6, seriesCount, "series count didn't match expected") +} + +func (suite *testSQLSyntaxQuerySuite) TestGroupByNotExistingLabel() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux", "region", "europe") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,count", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), + GroupBy: "something that does not exist"} + _, err = querierV2.Select(params) + if err == nil { + suite.T().Fatalf("expected fail but continued normally") + } +} + +func (suite *testSQLSyntaxQuerySuite) TestAggregateSeriesWithAlias() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedResult := 40.0 + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + aliasName := "iguaz" + params, _, _ := pquerier.ParseQuery(fmt.Sprintf("select max(cpu) as %v", aliasName)) + + params.From = suite.basicQueryTime + params.To = suite.basicQueryTime + int64(numberOfEvents*eventsInterval) + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + assert.Equal(suite.T(), 1, len(data), "queried data does not match expected") + assert.Equal(suite.T(), expectedResult, data[0].Value, "queried data does not match expected") + + seriesName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + suite.Equal(aliasName, seriesName) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testSQLSyntaxQuerySuite) TestAggregateSeriesWildcardOnPartOfTheColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedResult := map[string]float64{"max(cpu)": 40, "max(diskio)": 40, "min(cpu)": 10} + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2") + + params, _, _ := pquerier.ParseQuery("select max(*), min(cpu)") + + params.From = suite.basicQueryTime + params.To = suite.basicQueryTime + int64(numberOfEvents*eventsInterval) + + set, err := querierV2.Select(params) + suite.NoError(err, "failed to exeute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + labels := set.At().Labels() + expectedKey := fmt.Sprintf("%v(%v)", labels.Get(aggregate.AggregateLabel), labels.Get(config.PrometheusMetricNameAttribute)) + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + suite.Require().Equal(1, len(data), "queried data does not match expected") + suite.Require().Equal(expectedResult[expectedKey], data[0].Value, "queried data does not match expected") + } + + suite.Require().Equal(len(expectedResult), seriesCount, "series count didn't match expected") +} + +func (suite *testSQLSyntaxQuerySuite) TestAggregateSeriesWildcardOnPartOfTheColumnsWithVirtualColumn() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedResult := map[string]float64{"avg(cpu)": 25, "avg(diskio)": 25, "min(cpu)": 10} + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2") + + params, _, _ := pquerier.ParseQuery("select avg(*), min(cpu)") + + params.From = suite.basicQueryTime + params.To = suite.basicQueryTime + int64(numberOfEvents*eventsInterval) + + set, err := querierV2.Select(params) + suite.NoError(err, "failed to exeute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + labels := set.At().Labels() + expectedKey := fmt.Sprintf("%v(%v)", labels.Get(aggregate.AggregateLabel), labels.Get(config.PrometheusMetricNameAttribute)) + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + suite.Require().Equal(1, len(data), "queried data does not match expected") + suite.Require().Equal(expectedResult[expectedKey], data[0].Value, "queried data does not match expected") + } + + suite.Require().Equal(len(expectedResult), seriesCount, "series count didn't match expected") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go new file mode 100644 index 00000000..3a61864c --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go @@ -0,0 +1,792 @@ +// +build integration + +package pqueriertest + +import ( + "errors" + "fmt" + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testRawQuerySuite struct { + basicQueryTestSuite +} + +func TestRawQuerySuite(t *testing.T) { + suite.Run(t, new(testRawQuerySuite)) +} + +func (suite *testRawQuerySuite) TestRawDataSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestRawDataMultiplePartitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*eventsInterval, 30}, + {suite.basicQueryTime + 3*eventsInterval, 40}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestFilterOnLabel() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*eventsInterval, 30}, + {suite.basicQueryTime + 3*eventsInterval, 40}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Filter: "os=='linux'", + From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestQueryWithBadTimeParameters() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), To: suite.basicQueryTime} + _, err = querierV2.Select(params) + if err == nil { + suite.T().Fatalf("expected to get error but no error was returned") + } +} + +func (suite *testRawQuerySuite) TestSelectRawDataByRequestedColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := ingestedData + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expected { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestRawDataMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*eventsInterval, 30}, + {suite.basicQueryTime + 4*eventsInterval, 40}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 5*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + 2*tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 3*eventsInterval, 30}, + {suite.basicQueryTime + 4*eventsInterval, 40}} + + expectedData := map[string][]tsdbtest.DataPoint{metricName1: ingestData1, metricName2: ingestData2} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels2, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName1}, {Metric: metricName2}}, + From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + name := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expectedData[name] { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestDataFrameRawDataMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + expectedTimeColumn := []int64{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, suite.basicQueryTime - 5*tsdbtest.DaysInMillis, + suite.basicQueryTime + tsdbtest.MinuteInMillis, suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]float64{metricName1: {10, math.NaN(), 20, 30, math.NaN(), 40}, + metricName2: {math.NaN(), 10, math.NaN(), 20, 30, 40}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}}}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels2, + Data: []tsdbtest.DataPoint{{suite.basicQueryTime - 5*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + 2*tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}}}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName1}, {Metric: metricName2}}, + From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + iter, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err) + indexCol := frame.Indices()[0] // in tsdb we have only one index + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + assert.Equal(suite.T(), expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + column, err := frame.Column(columnName) + suite.NoError(err) + v, _ := column.FloatAt(i) + + expected := expectedColumns[column.Name()][i] + + // assert can not compare NaN, so we need to check it manually + if !(math.IsNaN(expected) && math.IsNaN(v)) { + assert.Equal(suite.T(), expectedColumns[column.Name()][i], v, "column %v does not match at index %v", column.Name(), i) + } + } + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestQueryMultipleMetricsWithMultipleLabelSets() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}} + ingestData3 := []tsdbtest.DataPoint{{suite.basicQueryTime, 30}, + {suite.basicQueryTime + tsdbtest.MinuteInMillis, 40}} + + expectedData := map[string][]tsdbtest.DataPoint{fmt.Sprintf("%v-%v", metricName1, "linux"): ingestData1, + fmt.Sprintf("%v-%v", metricName2, "linux"): ingestData2, + fmt.Sprintf("%v-%v", metricName2, "mac"): ingestData3} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels1, + Data: ingestData2}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels2, + Data: ingestData3}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Filter: "1==1", + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + name := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + os := set.At().Labels().Get("os") + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expectedData[fmt.Sprintf("%v-%v", name, os)] { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestDifferentLabelSetsInDifferentPartitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 9*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis, 40}, + {suite.basicQueryTime, 40}} + + expected := []tsdbtest.DataPoint{{suite.basicQueryTime, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{From: suite.basicQueryTime - 9*tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expected) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestDifferentMetricsInDifferentPartitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 9*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis, 10}, + {suite.basicQueryTime, 40}} + + expected := []tsdbtest.DataPoint{{suite.basicQueryTime, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{From: suite.basicQueryTime - 9*tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expected { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestQueryMetricDoesNotHaveData() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels := utils.LabelsFromStringList("os", "linux") + cpuData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + diskioData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels, + Data: cpuData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels, + Data: diskioData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu, diskio", + From: suite.basicQueryTime + tsdbtest.MinuteInMillis, + To: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + expectedTimeColumn := []int64{suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]float64{"cpu": {20, 30, 40}, + "diskio": {math.NaN(), math.NaN(), math.NaN()}} + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err, "failed to get frame") + indexCol := frame.Indices()[0] // in tsdb we have only one index + suite.Require().Equal(len(expectedColumns), len(frame.Names()), + "columns size does not match expected, got: %v", frame.Names()) + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + suite.Require().Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + column, err := frame.Column(columnName) + suite.NoError(err) + v, _ := column.FloatAt(i) + + expected := expectedColumns[columnName][i] + + // assert can not compare NaN, so we need to check it manually + if !(math.IsNaN(expected) && math.IsNaN(v)) { + suite.Require().Equal(expectedColumns[column.Name()][i], v, "column %v does not match at index %v", column.Name(), i) + } + } + } + } +} + +// Regression test for IG-13690 +func (suite *testRawQuerySuite) TestQueryMultiMetricsInconsistentLabels() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels := utils.LabelsFromStringList("os", "linux") + cpuData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + diskioData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{Name: "cpu", Labels: labels, Data: cpuData}, + tsdbtest.Metric{Name: "diskio", Data: diskioData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{ + Name: "cpu, diskio", + From: suite.basicQueryTime, + To: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, + } + iter, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + for iter.Next() { + suite.NotNil(iter.At(), "Iterator yielded a nil series") + } +} + +func (suite *testRawQuerySuite) TestLoadPartitionsFromAttributes() { + suite.v3ioConfig.LoadPartitionsFromSchemaAttr = true + defer func() { suite.v3ioConfig.LoadPartitionsFromSchemaAttr = false }() + + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*eventsInterval, 30}, + {suite.basicQueryTime + 3*eventsInterval, 40}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i := 0; i < len(expectedData); i++ { + assert.Equal(suite.T(), expectedData[i].Time, data[i].Time) + currentExpected := expectedData[i].Value + switch val := currentExpected.(type) { + case float64: + assert.Equal(suite.T(), val, data[i].Value) + case int: + assert.Equal(suite.T(), float64(val), data[i].Value) + case string: + assert.Equal(suite.T(), val, data[i].Value) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go new file mode 100644 index 00000000..4c579fff --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go @@ -0,0 +1,383 @@ +// +build integration + +package pqueriertest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testServerAggregatesSuite struct { + basicQueryTestSuite +} + +func TestServerAggregatesSuite(t *testing.T) { + suite.Run(t, new(testServerAggregatesSuite)) +} + +func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: 100}}, + "min": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: 10}}, + "max": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min", + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartitionNegativeValues() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, -10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), -20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, -30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, -40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: -100}}, + "min": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: -40}}, + "max": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: -10}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min", + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + firstStepTime := suite.basicQueryTime - 7*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis + secondStepTime := suite.basicQueryTime - 1*tsdbtest.HoursInMillis + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: firstStepTime, Value: 10}, {Time: secondStepTime, Value: 90}}, + "min": {{Time: firstStepTime, Value: 10}, {Time: secondStepTime, Value: 20}}, + "max": {{Time: firstStepTime, Value: 10}, {Time: secondStepTime, Value: 40}}, + "sqr": {{Time: firstStepTime, Value: 100}, {Time: secondStepTime, Value: 2900}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min,sqr", + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartitionNonConcreteAggregates() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + tsdbtest.MinuteInMillis, 12}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + firstStepTime := suite.basicQueryTime - 7*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis + secondStepTime := suite.basicQueryTime - 1*tsdbtest.HoursInMillis + + expected := map[string][]tsdbtest.DataPoint{"avg": {{Time: firstStepTime, Value: 11}, {Time: secondStepTime, Value: 30}}, + "stdvar": {{Time: firstStepTime, Value: 2}, {Time: secondStepTime, Value: 100}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "avg,stdvar", + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestSelectServerAggregatesAndRawByRequestedColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: 100}}, + "": {{suite.basicQueryTime - 4*tsdbtest.HoursInMillis, 10}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu", Function: "sum"}, {Metric: "cpu", Interpolator: "next_val", InterpolationTolerance: 5 * tsdbtest.HoursInMillis}}, + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestAggregatesWithDisabledClientAggregation() { + suite.v3ioConfig.DisableClientAggr = true + defer func() { suite.v3ioConfig.DisableClientAggr = false }() + + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"avg": {{Time: suite.basicQueryTime - tsdbtest.DaysInMillis, Value: 10}, + {Time: suite.basicQueryTime - tsdbtest.HoursInMillis, Value: 30}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "avg", From: suite.basicQueryTime - tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/variant_type_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/variant_type_query_integration_test.go new file mode 100644 index 00000000..9c82557e --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/variant_type_query_integration_test.go @@ -0,0 +1,284 @@ +// +build integration + +package pqueriertest + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testVariantTypeSuite struct { + basicQueryTestSuite +} + +func TestVariantTypeSuite(t *testing.T) { + suite.Run(t, new(testVariantTypeSuite)) +} + +func (suite *testVariantTypeSuite) TestVariantTypeQueryWithDataFrame() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + metricName := "log" + labels := utils.LabelsFromStringList("os", "linux", "__name__", metricName) + + dataToIngest := []string{"a", "b", "c", "d", "e"} + numberOfEvents := len(dataToIngest) + var expectedTimeColumn []int64 + for i := 0; i < numberOfEvents; i++ { + expectedTimeColumn = append(expectedTimeColumn, suite.basicQueryTime+int64(i)*tsdbtest.MinuteInMillis) + } + + appender, err := adapter.Appender() + if err != nil { + suite.T().Fatalf("failed to create v3io appender. reason: %s", err) + } + + ref, err := appender.Add(labels, expectedTimeColumn[0], dataToIngest[0]) + if err != nil { + suite.T().Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for i := 1; i < numberOfEvents; i++ { + appender.AddFast(labels, ref, expectedTimeColumn[i], dataToIngest[i]) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + suite.T().Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName}}, + From: suite.basicQueryTime - tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + iter, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err) + indexCol := frame.Indices()[0] // in tsdb we have only one index + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + assert.Equal(suite.T(), expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + column, err := frame.Column(columnName) + suite.NoError(err) + v, _ := column.StringAt(i) + + expected := dataToIngest[i] + + assert.Equal(suite.T(), expected, v, "column %v does not match at index %v", column.Name(), i) + } + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testVariantTypeSuite) TestVariantTypeQueryWithSeries() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + metricName := "log" + labels := utils.LabelsFromStringList("os", "linux", "__name__", metricName) + + dataToIngest := []string{"a", "b", "c", "d", "e"} + numberOfEvents := len(dataToIngest) + var expectedTimeColumn []int64 + for i := 0; i < numberOfEvents; i++ { + expectedTimeColumn = append(expectedTimeColumn, suite.basicQueryTime+int64(i)*tsdbtest.MinuteInMillis) + } + + appender, err := adapter.Appender() + if err != nil { + suite.T().Fatalf("failed to create v3io appender. reason: %s", err) + } + + ref, err := appender.Add(labels, expectedTimeColumn[0], dataToIngest[0]) + if err != nil { + suite.T().Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for i := 1; i < numberOfEvents; i++ { + appender.AddFast(labels, ref, expectedTimeColumn[i], dataToIngest[i]) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + suite.T().Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName}}, + From: suite.basicQueryTime - tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + iter, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + var seriesCount int + for iter.Next() { + seriesCount++ + iter := iter.At().Iterator() + var i int + for iter.Next() { + t, v := iter.AtString() + assert.Equal(suite.T(), expectedTimeColumn[i], t, "time does not match at index %v", i) + assert.Equal(suite.T(), dataToIngest[i], v, "value does not match at index %v", i) + i++ + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testVariantTypeSuite) TestCountAggregationForVariantTypeQueryWithSeries() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + metricName := "log" + labels := utils.LabelsFromStringList("os", "linux", "__name__", metricName) + + dataToIngest := []string{"a", "b", "c", "d", "e", "f"} + numberOfEvents := len(dataToIngest) + var expectedTimeColumn []int64 + for i := 0; i < numberOfEvents; i++ { + expectedTimeColumn = append(expectedTimeColumn, suite.basicQueryTime+int64(i)*tsdbtest.MinuteInMillis) + } + + expected := map[string][]tsdbtest.DataPoint{"count": {{Time: suite.basicQueryTime - 5*tsdbtest.MinuteInMillis, Value: numberOfEvents}}} + + appender, err := adapter.Appender() + if err != nil { + suite.T().Fatalf("failed to create v3io appender. reason: %s", err) + } + + ref, err := appender.Add(labels, expectedTimeColumn[0], dataToIngest[0]) + if err != nil { + suite.T().Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for i := 1; i < numberOfEvents; i++ { + appender.AddFast(labels, ref, expectedTimeColumn[i], dataToIngest[i]) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + suite.T().Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{ + From: suite.basicQueryTime - tsdbtest.DaysInMillis, + To: suite.basicQueryTime + tsdbtest.DaysInMillis, + Functions: "count", + Step: 10 * tsdbtest.MinuteInMillis} + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + labels := set.At().Labels() + agg := labels.Get(aggregate.AggregateLabel) + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testVariantTypeSuite) TestVariantTypeQueryWithSeriesAlotOfData() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + metricName := "log" + labels := utils.LabelsFromStringList("os", "linux", "__name__", metricName) + + numberOfEvents := 1000 + dataToIngest := make([]tsdbtest.DataPoint, numberOfEvents) + for i := 0; i < numberOfEvents; i++ { + dataToIngest[i] = tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.MinuteInMillis, + Value: fmt.Sprintf("%v", i)} + } + + appender, err := adapter.Appender() + if err != nil { + suite.T().Fatalf("failed to create v3io appender. reason: %s", err) + } + + ref, err := appender.Add(labels, dataToIngest[0].Time, dataToIngest[0].Value) + if err != nil { + suite.T().Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for i := 1; i < numberOfEvents; i++ { + appender.AddFast(labels, ref, dataToIngest[i].Time, dataToIngest[i].Value) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + suite.T().Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName}}, + From: suite.basicQueryTime - tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + iter, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + var seriesCount int + for iter.Next() { + seriesCount++ + iter := iter.At().Iterator() + var slice []tsdbtest.DataPoint + for iter.Next() { + t, v := iter.AtString() + slice = append(slice, tsdbtest.DataPoint{Time: t, Value: v}) + } + + suite.Require().Equal(dataToIngest, slice, "number of events mismatch") + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go new file mode 100644 index 00000000..be0a304d --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go @@ -0,0 +1,478 @@ +// +build integration + +package pqueriertest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testWindowAggregationSuite struct { + basicQueryTestSuite +} + +func TestWindowAggregationSuite(t *testing.T) { + suite.Run(t, new(testWindowAggregationSuite)) +} + +func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowBiggerThanStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.MinuteInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 150}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 390}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.MinuteInMillis, + AggregationWindow: 6 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowSmallerThanStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.MinuteInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 120}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 170}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.MinuteInMillis, + AggregationWindow: 2 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowEqualToStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.MinuteInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 150}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 300}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.MinuteInMillis, + AggregationWindow: 5 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowExceedsPartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestedData := []tsdbtest.DataPoint{{Time: suite.toMillis("2018-07-19T23:50:00Z"), Value: 1}, + {Time: suite.toMillis("2018-07-19T23:55:00Z"), Value: 2}, + {Time: suite.toMillis("2018-07-19T23:57:00Z"), Value: 3}, + {Time: suite.toMillis("2018-07-20T00:10:00Z"), Value: 4}, + {Time: suite.toMillis("2018-07-20T00:20:00Z"), Value: 5}, + {Time: suite.toMillis("2018-07-20T00:30:00Z"), Value: 6}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.toMillis("2018-07-20T00:10:00Z"), Value: 10}, + {Time: suite.toMillis("2018-07-20T00:20:00Z"), Value: 15}, + {Time: suite.toMillis("2018-07-20T00:30:00Z"), Value: 15}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 10 * tsdbtest.MinuteInMillis, + AggregationWindow: 30 * tsdbtest.MinuteInMillis, + From: suite.toMillis("2018-07-20T00:10:00Z"), + To: suite.toMillis("2018-07-20T00:30:00Z")} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowBiggerThanStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.HoursInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.HoursInMillis, Value: 150}, + {Time: suite.basicQueryTime + 10*tsdbtest.HoursInMillis, Value: 350}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.HoursInMillis, + AggregationWindow: 6 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.HoursInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqualToStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.HoursInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.HoursInMillis, Value: 150}, + {Time: suite.basicQueryTime + 10*tsdbtest.HoursInMillis, Value: 300}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.HoursInMillis, + AggregationWindow: 5 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.HoursInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqualToRollupInterval() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.HoursInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 1*tsdbtest.HoursInMillis, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.HoursInMillis, Value: 20}, + {Time: suite.basicQueryTime + 3*tsdbtest.HoursInMillis, Value: 30}, + {Time: suite.basicQueryTime + 4*tsdbtest.HoursInMillis, Value: 40}, + {Time: suite.basicQueryTime + 5*tsdbtest.HoursInMillis, Value: 50}, + {Time: suite.basicQueryTime + 6*tsdbtest.HoursInMillis, Value: 60}, + {Time: suite.basicQueryTime + 7*tsdbtest.HoursInMillis, Value: 70}, + {Time: suite.basicQueryTime + 8*tsdbtest.HoursInMillis, Value: 80}, + {Time: suite.basicQueryTime + 9*tsdbtest.HoursInMillis, Value: 90}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 1 * tsdbtest.HoursInMillis, + AggregationWindow: 1 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.HoursInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go new file mode 100644 index 00000000..573af6a0 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go @@ -0,0 +1,363 @@ +package pquerier + +import ( + "fmt" + "math" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Create a new Querier interface +func NewV3ioQuerier(container v3io.Container, logger logger.Logger, + cfg *config.V3ioConfig, partMngr *partmgr.PartitionManager) *V3ioQuerier { + newQuerier := V3ioQuerier{ + container: container, + logger: logger.GetChild("Querier"), + cfg: cfg, + } + newQuerier.partitionMngr = partMngr + newQuerier.performanceReporter = performance.ReporterInstanceFromConfig(cfg) + return &newQuerier +} + +type V3ioQuerier struct { + logger logger.Logger + container v3io.Container + cfg *config.V3ioConfig + partitionMngr *partmgr.PartitionManager + performanceReporter *performance.MetricReporter +} + +type SelectParams struct { + Name string + Functions string + From, To, Step int64 + Windows []int + Filter string + RequestedColumns []RequestedColumn + GroupBy string + AggregationWindow int64 + UseOnlyClientAggr bool + + disableAllAggr bool + disableClientAggr bool +} + +func (s *SelectParams) getRequestedColumns() ([]RequestedColumn, error) { + if err := s.validateSelectParams(); err != nil { + return nil, err + } + if s.RequestedColumns != nil { + return s.RequestedColumns, nil + } + functions := strings.Split(s.Functions, ",") + metricNames := strings.Split(s.Name, ",") + columns := make([]RequestedColumn, len(functions)*len(metricNames)) + var index int + for _, metric := range metricNames { + for _, function := range functions { + trimmed := strings.TrimSpace(function) + metricName := strings.TrimSpace(metric) + newCol := RequestedColumn{Function: trimmed, Metric: metricName, Interpolator: defaultInterpolation.String()} + columns[index] = newCol + index++ + } + } + return columns, nil +} + +func (s *SelectParams) validateSelectParams() error { + if s.UseOnlyClientAggr && s.disableClientAggr { + return errors.New("can not query, both `useOnlyClientAggr` and `disableClientAggr` flags are set") + } + + if s.RequestedColumns == nil { + functions := strings.Split(s.Functions, ",") + functionMap := make(map[string]bool, len(functions)) + for _, function := range functions { + trimmed := strings.TrimSpace(function) + if functionMap[trimmed] { + return fmt.Errorf("function '%v' was requested multiple time", trimmed) + } + functionMap[trimmed] = true + } + } else { + functionMap := make(map[string]bool, len(s.RequestedColumns)) + for _, col := range s.RequestedColumns { + trimmed := strings.TrimSpace(col.Function) + key := fmt.Sprintf("%v-%v", col.Metric, trimmed) + if functionMap[key] { + return fmt.Errorf("function '%v' for metric '%v' was requested multiple time", trimmed, col.Metric) + } + functionMap[key] = true + } + } + + return nil +} + +func (q *V3ioQuerier) SelectProm(params *SelectParams, noAggr bool) (utils.SeriesSet, error) { + params.disableAllAggr = noAggr + params.disableClientAggr = q.cfg.DisableClientAggr + iter, err := q.baseSelectQry(params, false) + if err != nil || iter == nil { + return utils.NullSeriesSet{}, err + } + + return iter, nil +} + +// Base query function +func (q *V3ioQuerier) Select(params *SelectParams) (utils.SeriesSet, error) { + params.disableAllAggr = false + params.disableClientAggr = q.cfg.DisableClientAggr + iter, err := q.baseSelectQry(params, true) + if err != nil || iter == nil { + return utils.NullSeriesSet{}, err + } + + return iter, nil +} + +func (q *V3ioQuerier) SelectDataFrame(params *SelectParams) (FrameSet, error) { + params.disableAllAggr = false + params.disableClientAggr = q.cfg.DisableClientAggr + iter, err := q.baseSelectQry(params, true) + if err != nil || iter == nil { + return nullFrameSet{}, err + } + + return iter, nil +} + +func (q *V3ioQuerier) baseSelectQry(params *SelectParams, showAggregateLabel bool) (iter *frameIterator, err error) { + if params.To < params.From { + return nil, errors.Errorf("End time '%d' is lower than start time '%d'.", params.To, params.From) + } + + err = q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return nil, errors.Wrap(err, "Failed to read/update the TSDB schema.") + } + + // If the config is set to use only client configuration override the query parameter. + if q.cfg.UsePreciseAggregations { + params.UseOnlyClientAggr = true + } + + selectContext := selectQueryContext{ + container: q.container, + logger: q.logger, + workers: q.cfg.QryWorkers, + showAggregateLabel: showAggregateLabel, + v3ioConfig: q.cfg, + } + + q.logger.Debug("Select query:\n\tMetric: %s\n\tStart Time: %s (%d)\n\tEnd Time: %s (%d)\n\tFunction: %s\n\t"+ + "Step: %d\n\tFilter: %s\n\tWindows: %v\n\tDisable All Aggr: %t\n\tDisable Client Aggr: %t", + params.Name, time.Unix(params.From/1000, 0).String(), params.From, time.Unix(params.To/1000, 0).String(), + params.To, params.Functions, params.Step, + params.Filter, params.Windows, params.disableAllAggr, params.disableClientAggr) + + q.performanceReporter.WithTimer("QueryTimer", func() { + params.Filter = strings.Replace(params.Filter, config.PrometheusMetricNameAttribute, config.MetricNameAttrName, -1) + + // Get all partitions containing data relevant to the query. If the Aggregation Window parameter is specified take it in account. + parts := q.partitionMngr.PartsForRange(params.From-params.AggregationWindow, params.To, true) + if len(parts) == 0 { + return + } + + minExistingTime, maxExistingTime := parts[0].GetStartTime(), parts[len(parts)-1].GetEndTime() + if params.From < minExistingTime { + params.From = minExistingTime + } + if params.To > maxExistingTime { + params.To = maxExistingTime + } + + iter, err = selectContext.start(parts, params) + return + }) + + return +} + +// Return the current metric names +func (q *V3ioQuerier) LabelValues(labelKey string) (result []string, err error) { + q.performanceReporter.WithTimer("LabelValuesTimer", func() { + if labelKey == config.PrometheusMetricNameAttribute { + result, err = q.getMetricNames() + } else { + result, err = q.getLabelValues(labelKey) + } + }) + return +} + +// Stub +func (q *V3ioQuerier) LabelNames() ([]string, error) { + return nil, nil +} + +func (q *V3ioQuerier) getMetricNames() ([]string, error) { + input := v3io.GetItemsInput{ + Path: filepath.Join(q.cfg.TablePath, config.NamesDirectory) + "/", // Need a trailing slash + AttributeNames: []string{config.ObjectNameAttrName}, + } + + iter, err := utils.NewAsyncItemsCursor(q.container, &input, q.cfg.QryWorkers, []string{}, q.logger) + if err != nil { + return nil, err + } + + var metricNames []string + + for iter.Next() { + metricNames = append(metricNames, iter.GetField(config.ObjectNameAttrName).(string)) + } + + sort.Sort(sort.StringSlice(metricNames)) + + if iter.Err() != nil { + return nil, fmt.Errorf("failed to read metric names; err = %v", iter.Err().Error()) + } + + return metricNames, nil +} + +func (q *V3ioQuerier) getLabelValues(labelKey string) ([]string, error) { + + // Sync the partition manager (hack) + err := q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return nil, err + } + + partitionPaths := q.partitionMngr.GetPartitionsPaths() + + // If there are no partitions yet, there are no labels + if len(partitionPaths) == 0 { + return nil, nil + } + + labelValuesMap := map[string]struct{}{} + + // Get all label sets + input := v3io.GetItemsInput{ + Path: partitionPaths[0], + AttributeNames: []string{config.LabelSetAttrName}, + } + + iter, err := utils.NewAsyncItemsCursor(q.container, &input, q.cfg.QryWorkers, []string{}, q.logger) + if err != nil { + return nil, err + } + + // Iterate over the results + for iter.Next() { + labelSet := iter.GetField(config.LabelSetAttrName).(string) + + // For a label set of k1=v1,k2=v2, k2=v3, for labelKey "k2", for example, + // we want to convert the set to [v2, v3] + + // Split at "," to get k=v pairs + for _, label := range strings.Split(labelSet, ",") { + + // Split at "=" to get the label key and label value + splitLabel := strings.SplitN(label, "=", 2) + + // If we have two elements and the first element (the key) is equal + // to what we're looking for, save the label value in the map. + // Use a map to prevent duplicates. + if len(splitLabel) == 2 && splitLabel[0] == labelKey { + labelValuesMap[splitLabel[1]] = struct{}{} + } + } + } + + if iter.Err() != nil { + return nil, fmt.Errorf("failed to read label values, err= %v", iter.Err().Error()) + } + + var labelValues []string + for labelValue := range labelValuesMap { + labelValues = append(labelValues, labelValue) + } + + return labelValues, nil +} + +// Returns all unique labels sets we have in the data +func (q *V3ioQuerier) GetLabelSets(metric string, filter string) ([]utils.Labels, error) { + err := q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return nil, err + } + + partitionPaths := q.partitionMngr.GetPartitionsPaths() + + // If there are no partitions yet, there are no labels + if len(partitionPaths) == 0 { + return nil, nil + } + + var shardingKeys []string + if metric != "" { + shardingKeys = q.partitionMngr.PartsForRange(0, math.MaxInt64, true)[0].GetShardingKeys(metric) + } + + labelsMap := make(map[uint64]utils.Labels) + + // Get all label sets + input := v3io.GetItemsInput{ + Filter: filter, + AttributeNames: []string{config.LabelSetAttrName, config.MetricNameAttrName}, + } + + // Because of performance issues we only want to query the last two partitions + partitionsToQuery := []string{partitionPaths[len(partitionPaths)-1]} + if len(partitionPaths) > 1 { + partitionsToQuery = append(partitionsToQuery, partitionPaths[len(partitionPaths)-2]) + } + iter, err := utils.NewAsyncItemsCursorMultiplePartitions(q.container, &input, q.cfg.QryWorkers, shardingKeys, q.logger, partitionsToQuery) + if err != nil { + return nil, err + } + + // Iterate over the results + for iter.Next() { + labelSet := iter.GetField(config.LabelSetAttrName).(string) + currLabels, err := utils.LabelsFromString(labelSet) + if err != nil { + return nil, err + } + + currLabels = append(utils.LabelsFromStringList(config.PrometheusMetricNameAttribute, + iter.GetField(config.MetricNameAttrName).(string)), currLabels...) + + labelsMap[currLabels.Hash()] = currLabels + } + + if iter.Err() != nil { + return nil, fmt.Errorf("failed to read label values, err= %v", iter.Err().Error()) + } + + labels := make([]utils.Labels, len(labelsMap)) + var counter int + for _, lset := range labelsMap { + labels[counter] = lset + counter++ + } + return labels, nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go new file mode 100644 index 00000000..4f3cd121 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go @@ -0,0 +1,628 @@ +package pquerier + +import ( + "fmt" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/frames" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const defaultToleranceFactor = 2 + +type selectQueryContext struct { + logger logger.Logger + container v3io.Container + workers int + v3ioConfig *config.V3ioConfig + + queryParams *SelectParams + showAggregateLabel bool + + columnsSpec []columnMeta + columnsSpecByMetric map[string][]columnMeta + totalColumns int + isCrossSeriesAggregate bool + + // In case one of the aggregates of one of the metrics should use client side aggregates + // but the user requested to disable client aggregations - return raw data for every requested metric + forceRawQuery bool + + dataFrames map[uint64]*dataFrame + frameList []*dataFrame + requestChannels []chan *qryResults + errorChannel chan error + wg sync.WaitGroup + createDFLock sync.Mutex + stopChan chan bool + queryWG sync.WaitGroup + finalErrorChan chan error +} + +func (queryCtx *selectQueryContext) start(parts []*partmgr.DBPartition, params *SelectParams) (*frameIterator, error) { + queryCtx.dataFrames = make(map[uint64]*dataFrame) + + queryCtx.queryParams = params + var err error + queryCtx.columnsSpec, queryCtx.columnsSpecByMetric, err = queryCtx.createColumnSpecs() + if err != nil { + return nil, err + } + + // If step isn't passed (e.g., when using the console), the step is the + // difference between the end (maxt) and start (mint) times (e.g., 5 minutes) + if queryCtx.hasAtLeastOneFunction() && params.Step == 0 { + queryCtx.queryParams.Step = params.To - params.From + } + + // We query every partition for every requested metric + queries := make([]*partQuery, len(parts)*len(queryCtx.columnsSpecByMetric)) + + var queryIndex int + for _, part := range parts { + currQueries, err := queryCtx.queryPartition(part) + if err != nil { + return nil, err + } + for _, q := range currQueries { + queries[queryIndex] = q + queryIndex++ + } + } + + queryCtx.stopChan = make(chan bool, 1) + queryCtx.finalErrorChan = make(chan error, 1) + queryCtx.errorChannel = make(chan error, queryCtx.workers+len(queries)) + + err = queryCtx.startCollectors() + if err != nil { + return nil, err + } + + for _, query := range queries { + queryCtx.queryWG.Add(1) + go processQueryResults(queryCtx, query) + } + + queryCtx.queryWG.Wait() + for i := 0; i < queryCtx.workers; i++ { + close(queryCtx.requestChannels[i]) + } + + // wait for Go routines to complete + queryCtx.wg.Wait() + close(queryCtx.errorChannel) + + // return first error + err = <-queryCtx.finalErrorChan + if err != nil { + return nil, err + } + + if len(queryCtx.frameList) > 0 { + queryCtx.totalColumns = queryCtx.frameList[0].Len() + } + + return newFrameIterator(queryCtx) +} + +func (queryCtx *selectQueryContext) metricsAggregatesToString(metric string) (string, bool) { + var result strings.Builder + specs := queryCtx.columnsSpecByMetric[metric] + specsNum := len(specs) + if specsNum == 0 { + return "", false + } + + var requestedRawColumn bool + result.WriteString(specs[0].function.String()) + for i := 1; i < specsNum; i++ { + if specs[i].function.String() == "" { + requestedRawColumn = true + } else { + result.WriteString(",") + result.WriteString(specs[i].function.String()) + } + } + + return result.String(), requestedRawColumn && result.Len() > 0 +} + +// Query a single partition +func (queryCtx *selectQueryContext) queryPartition(partition *partmgr.DBPartition) ([]*partQuery, error) { + var queries []*partQuery + var err error + + mint, maxt := partition.GetPartitionRange() + + if queryCtx.queryParams.To < maxt { + maxt = queryCtx.queryParams.To + } + + if queryCtx.queryParams.From > mint { + mint = queryCtx.queryParams.From + } + + queryRawInsteadOfAggregates, doForceAllRawQuery := false, false + var index int + + for metric := range queryCtx.columnsSpecByMetric { + var aggregationParams *aggregate.AggregationParams + functions, requestAggregatesAndRaw := queryCtx.metricsAggregatesToString(metric) + + // Check whether there are aggregations to add and aggregates aren't disabled + if functions != "" && !queryCtx.queryParams.disableAllAggr { + + if queryCtx.queryParams.Step > partition.RollupTime() && queryCtx.queryParams.disableClientAggr { + queryCtx.queryParams.Step = partition.RollupTime() + } + + params, err := aggregate.NewAggregationParams(functions, + "v", + partition.AggrBuckets(), + queryCtx.queryParams.Step, + queryCtx.queryParams.AggregationWindow, + partition.RollupTime(), + queryCtx.queryParams.Windows, + queryCtx.queryParams.disableClientAggr, + queryCtx.v3ioConfig.UseServerAggregateCoefficient) + + if err != nil { + return nil, err + } + aggregationParams = params + + } + + newQuery := &partQuery{mint: mint, + maxt: maxt, + partition: partition, + step: queryCtx.queryParams.Step, + name: metric, + aggregatesAndChunk: requestAggregatesAndRaw} + if aggregationParams != nil { + // Cross series aggregations cannot use server side aggregates. + newQuery.useServerSideAggregates = aggregationParams.CanAggregate(partition.AggrType()) && + !queryCtx.isCrossSeriesAggregate && + !queryCtx.queryParams.UseOnlyClientAggr + if newQuery.useServerSideAggregates || !queryCtx.queryParams.disableClientAggr { + newQuery.aggregationParams = aggregationParams + } + } + + if newQuery.useServerSideAggregates && !requestAggregatesAndRaw { + newQuery.preAggregateLabels = queryCtx.parsePreAggregateLabels(partition) + } + + queries = append(queries, newQuery) + + currentQueryShouldQueryRawInsteadOfAggregates := !newQuery.useServerSideAggregates && queryCtx.queryParams.disableClientAggr + if len(queryCtx.columnsSpecByMetric) == 1 && currentQueryShouldQueryRawInsteadOfAggregates { + doForceAllRawQuery = true + } else if index == 0 { + queryRawInsteadOfAggregates = currentQueryShouldQueryRawInsteadOfAggregates + } else if queryRawInsteadOfAggregates != currentQueryShouldQueryRawInsteadOfAggregates { + doForceAllRawQuery = true + } + index++ + } + + if doForceAllRawQuery { + queryCtx.forceRawQuery = true + for _, q := range queries { + q.aggregationParams = nil + q.useServerSideAggregates = false + err = q.getItems(queryCtx) + if err != nil { + break + } + } + } else { + for _, q := range queries { + err = q.getItems(queryCtx) + if err != nil { + break + } + } + } + + return queries, err +} + +func (queryCtx *selectQueryContext) parsePreAggregateLabels(partition *partmgr.DBPartition) []string { + if queryCtx.queryParams.GroupBy != "" { + groupByLabelSlice := strings.Split(queryCtx.queryParams.GroupBy, ",") + groupByLabelSet := make(map[string]bool) + for _, groupByLabel := range groupByLabelSlice { + groupByLabelSet[groupByLabel] = true + } + outer: + for _, preAggr := range partition.PreAggregates() { + if len(preAggr.Labels) != len(groupByLabelSet) { + continue + } + for _, label := range preAggr.Labels { + if !groupByLabelSet[label] { + continue outer + } + } + sort.Strings(groupByLabelSlice) + return groupByLabelSlice + } + } + return nil +} + +func (queryCtx *selectQueryContext) startCollectors() error { + + queryCtx.requestChannels = make([]chan *qryResults, queryCtx.workers) + + // Increment the WaitGroup counter. + queryCtx.wg.Add(queryCtx.workers) + + for i := 0; i < queryCtx.workers; i++ { + newChan := make(chan *qryResults, 1000) + queryCtx.requestChannels[i] = newChan + + go func(index int) { + mainCollector(queryCtx, queryCtx.requestChannels[index]) + }(i) + } + + // Watch error channel, and signal all go routines to stop in case of an error + go func() { + // Signal all goroutines to stop when error received + err, ok := <-queryCtx.errorChannel + if ok && err != nil { + close(queryCtx.stopChan) + queryCtx.finalErrorChan <- err + } + + close(queryCtx.finalErrorChan) + return + }() + + return nil +} + +func processQueryResults(queryCtx *selectQueryContext, query *partQuery) { + defer queryCtx.queryWG.Done() + + for query.Next() { + + // read metric name + name, ok := query.GetField(config.MetricNameAttrName).(string) + if !ok { + queryCtx.errorChannel <- fmt.Errorf("could not find metric name attribute in response, res:%v", query.GetFields()) + return + } + + // read label set + lsetAttr, lok := query.GetField(config.LabelSetAttrName).(string) + if !lok { + queryCtx.errorChannel <- fmt.Errorf("could not find label set attribute in response, res:%v", query.GetFields()) + return + } + + lset, err := utils.LabelsFromString(lsetAttr) + if err != nil { + queryCtx.errorChannel <- err + return + } + + // read chunk encoding type + var encoding chunkenc.Encoding + encodingStr, ok := query.GetField(config.EncodingAttrName).(string) + // If we don't have the encoding attribute, use XOR as default. (for backwards compatibility) + if !ok { + encoding = chunkenc.EncXOR + } else { + intEncoding, err := strconv.Atoi(encodingStr) + if err != nil { + queryCtx.errorChannel <- fmt.Errorf("error parsing encoding type of chunk, got: %v, error: %v", encodingStr, err) + return + } + encoding = chunkenc.Encoding(intEncoding) + } + + results := qryResults{name: name, encoding: encoding, query: query, fields: query.GetFields()} + sort.Sort(lset) // maybe skipped if its written sorted + var hash uint64 + + if queryCtx.queryParams.GroupBy != "" { + groupByList := strings.Split(queryCtx.queryParams.GroupBy, ",") + newLset := make(utils.Labels, len(groupByList)) + for i, label := range groupByList { + trimmed := strings.TrimSpace(label) + labelValue := lset.Get(trimmed) + if labelValue != "" { + newLset[i] = utils.Label{Name: trimmed, Value: labelValue} + } else { + queryCtx.errorChannel <- fmt.Errorf("no label named %v found to group by", trimmed) + return + } + } + lset = newLset + hash = newLset.Hash() + } else if queryCtx.isCrossSeriesAggregate { + hash = uint64(0) + lset = utils.Labels{} + } else { + hash = lset.Hash() + } + + queryCtx.createDFLock.Lock() + // find or create data frame + frame, ok := queryCtx.dataFrames[hash] + if !ok { + var err error + frame, err = newDataFrame(queryCtx.columnsSpec, + queryCtx.getOrCreateTimeColumn(), + lset, + hash, + queryCtx.isRawQuery(), + queryCtx.getResultBucketsSize(), + results.IsServerAggregates(), + queryCtx.showAggregateLabel) + if err != nil { + queryCtx.errorChannel <- err + queryCtx.createDFLock.Unlock() + return + } + queryCtx.dataFrames[hash] = frame + queryCtx.frameList = append(queryCtx.frameList, frame) + } + queryCtx.createDFLock.Unlock() + + results.frame = frame + workerNum := hash & uint64(queryCtx.workers-1) + + // In case termination signal was received exit, Otherwise send query result to worker + select { + case _ = <-queryCtx.stopChan: + return + case queryCtx.requestChannels[workerNum] <- &results: + } + + } + + if query.Err() != nil { + queryCtx.errorChannel <- query.Err() + } +} + +func (queryCtx *selectQueryContext) createColumnSpecs() ([]columnMeta, map[string][]columnMeta, error) { + var columnsSpec []columnMeta + columnsSpecByMetric := make(map[string][]columnMeta) + requestedColumns, err := queryCtx.queryParams.getRequestedColumns() + if err != nil { + return nil, nil, err + } + + for i, col := range requestedColumns { + _, ok := columnsSpecByMetric[col.Metric] + if !ok { + columnsSpecByMetric[col.Metric] = []columnMeta{} + } + + inter, err := StrToInterpolateType(col.Interpolator) + if err != nil { + return nil, nil, err + } + + tolerance := col.InterpolationTolerance + if tolerance == 0 { + tolerance = queryCtx.queryParams.Step * defaultToleranceFactor + } + colMeta := columnMeta{metric: col.Metric, alias: col.Alias, interpolationType: inter, interpolationTolerance: tolerance} + + if col.GetFunction() != "" { + // validating that all given aggregates are either cross series or not + if col.isCrossSeries() { + if i > 0 && !queryCtx.isCrossSeriesAggregate { + return nil, nil, fmt.Errorf("can not aggregate both over time and across series aggregates") + } + queryCtx.isCrossSeriesAggregate = true + } else if queryCtx.isCrossSeriesAggregate { + return nil, nil, fmt.Errorf("can not aggregate both over time and across series aggregates") + } + aggr, err := aggregate.FromString(col.GetFunction()) + if err != nil { + return nil, nil, err + } + colMeta.function = aggr + } + columnsSpecByMetric[col.Metric] = append(columnsSpecByMetric[col.Metric], colMeta) + columnsSpec = append(columnsSpec, colMeta) + } + + // Adding hidden columns if needed + for metric, cols := range columnsSpecByMetric { + var aggregatesMask aggregate.AggrType + var aggregates []aggregate.AggrType + var metricInterpolationType InterpolationType + var metricInterpolationTolerance int64 + for _, colSpec := range cols { + aggregatesMask |= colSpec.function + aggregates = append(aggregates, colSpec.function) + + if metricInterpolationType == 0 { + if colSpec.interpolationType != 0 { + metricInterpolationType = colSpec.interpolationType + metricInterpolationTolerance = colSpec.interpolationTolerance + } + } else if colSpec.interpolationType != 0 && colSpec.interpolationType != metricInterpolationType { + return nil, nil, fmt.Errorf("multiple interpolation for the same metric are not supported, got %v and %v", + metricInterpolationType.String(), + colSpec.interpolationType.String()) + } else if metricInterpolationTolerance != colSpec.interpolationTolerance { + return nil, nil, fmt.Errorf("different interpolation tolerances for the same metric are not supported, got %v and %v", + metricInterpolationTolerance, + colSpec.interpolationTolerance) + } + } + + // Add hidden aggregates only if there the user specified aggregations + if aggregatesMask != 0 { + hiddenColumns := aggregate.GetHiddenAggregatesWithCount(aggregatesMask, aggregates) + for _, hiddenAggr := range hiddenColumns { + hiddenCol := columnMeta{metric: metric, function: hiddenAggr, isHidden: true} + columnsSpec = append(columnsSpec, hiddenCol) + columnsSpecByMetric[metric] = append(columnsSpecByMetric[metric], hiddenCol) + } + } + + // After creating all columns set their interpolation function + for i := 0; i < len(columnsSpecByMetric[metric]); i++ { + columnsSpecByMetric[metric][i].interpolationType = metricInterpolationType + columnsSpecByMetric[metric][i].interpolationTolerance = metricInterpolationTolerance + } + for i, col := range columnsSpec { + if col.metric == metric { + columnsSpec[i].interpolationType = metricInterpolationType + columnsSpec[i].interpolationTolerance = metricInterpolationTolerance + } + } + } + + if len(columnsSpec) == 0 { + return nil, nil, errors.Errorf("no Columns were specified for query: %v", queryCtx.queryParams) + } + return columnsSpec, columnsSpecByMetric, nil +} + +func (queryCtx *selectQueryContext) getOrCreateTimeColumn() Column { + // When querying for raw data we don't need to generate a time column since we return the raw time + if queryCtx.isRawQuery() { + return nil + } + + return queryCtx.generateTimeColumn() +} + +func (queryCtx *selectQueryContext) generateTimeColumn() Column { + columnMeta := columnMeta{metric: "time"} + timeColumn := newDataColumn("time", columnMeta, queryCtx.getResultBucketsSize(), frames.TimeType) + i := 0 + for t := queryCtx.queryParams.From; t <= queryCtx.queryParams.To; t += queryCtx.queryParams.Step { + err := timeColumn.SetDataAt(i, time.Unix(t/1000, (t%1000)*1e6)) + if err != nil { + queryCtx.logger.ErrorWith(errors.Wrap(err, fmt.Sprintf("could not set data"))) + } else { + i++ + } + } + return timeColumn +} + +func (queryCtx *selectQueryContext) isRawQuery() bool { + return (!queryCtx.hasAtLeastOneFunction() && queryCtx.queryParams.Step == 0) || + queryCtx.queryParams.disableAllAggr || + queryCtx.forceRawQuery +} + +func (queryCtx *selectQueryContext) hasAtLeastOneFunction() bool { + atLeastOneFunction := false + for _, col := range queryCtx.columnsSpec { + if col.function != 0 { + atLeastOneFunction = true + break + } + } + return atLeastOneFunction +} + +func (queryCtx *selectQueryContext) getResultBucketsSize() int { + if queryCtx.isRawQuery() { + return 0 + } + return int((queryCtx.queryParams.To-queryCtx.queryParams.From)/queryCtx.queryParams.Step + 1) +} + +// query object for a single partition (or name and partition in future optimizations) + +type partQuery struct { + partition *partmgr.DBPartition + iter utils.ItemsCursor + partIndex int + + baseTime int64 + mint, maxt int64 + attrs []string + step int64 + + chunk0Time int64 + chunkTime int64 + useServerSideAggregates bool + aggregationParams *aggregate.AggregationParams + + name string + preAggregateLabels []string + aggregatesAndChunk bool +} + +func (query *partQuery) getItems(ctx *selectQueryContext) error { + + path := query.partition.GetTablePath() + if len(query.preAggregateLabels) > 0 { + path = fmt.Sprintf("%sagg/%s/", path, strings.Join(query.preAggregateLabels, ",")) + } + + var shardingKeys []string + if query.name != "" { + shardingKeys = query.partition.GetShardingKeys(query.name) + } + attrs := []string{config.LabelSetAttrName, config.EncodingAttrName, config.MetricNameAttrName, config.MaxTimeAttrName, config.ObjectNameAttrName} + + if query.useServerSideAggregates { + query.attrs = query.aggregationParams.GetAttrNames() + } + // It is possible to request both server aggregates and raw chunk data (to downsample) for the same metric + // example: `select max(cpu), avg(cpu), cpu` with step = 1h + if !query.useServerSideAggregates || query.aggregatesAndChunk { + chunkAttr, chunk0Time := query.partition.Range2Attrs("v", query.mint-ctx.queryParams.AggregationWindow, query.maxt) + query.chunk0Time = chunk0Time + query.attrs = append(query.attrs, chunkAttr...) + } + attrs = append(attrs, query.attrs...) + + ctx.logger.DebugWith("Select - GetItems", "path", path, "attr", attrs, "filter", ctx.queryParams.Filter, "name", query.name) + input := v3io.GetItemsInput{Path: path, AttributeNames: attrs, Filter: ctx.queryParams.Filter, ShardingKey: query.name} + iter, err := utils.NewAsyncItemsCursor(ctx.container, &input, ctx.workers, shardingKeys, ctx.logger) + if err != nil { + return err + } + + query.iter = iter + return nil +} + +func (query *partQuery) Next() bool { + var res bool + + res = query.iter.Next() + return res +} + +func (query *partQuery) GetField(name string) interface{} { + return query.iter.GetField(name) +} + +func (query *partQuery) GetFields() map[string]interface{} { + return query.iter.GetFields() +} + +func (query *partQuery) Err() error { + return query.iter.Err() +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go new file mode 100644 index 00000000..0a58d205 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go @@ -0,0 +1,168 @@ +// +build unit + +package pquerier + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/aggregate" +) + +func TestCreateColumnSpecs(t *testing.T) { + testCases := []struct { + desc string + params SelectParams + expectedSpecs []columnMeta + expectedSpecsMap map[string][]columnMeta + }{ + {params: SelectParams{Name: "cpu"}, + expectedSpecs: []columnMeta{{metric: "cpu", interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", interpolationType: interpolateNext}}}}, + + {params: SelectParams{Name: "cpu", Functions: "count"}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}}}}, + + {params: SelectParams{Name: "cpu", Functions: "avg"}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), isHidden: true, interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), isHidden: true, interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), isHidden: true, interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), isHidden: true, interpolationType: interpolateNext}}}}, + + {params: SelectParams{Name: "cpu", Functions: "avg,count"}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), isHidden: true, interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), isHidden: true, interpolationType: interpolateNext}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count")}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("count")}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}, + {Metric: "disk", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count")}, {metric: "disk", function: toAggr("count")}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("count")}}, + "disk": {{metric: "disk", function: toAggr("count")}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "avg"}, + {Metric: "cpu", Function: "sum"}, + {Metric: "disk", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("avg")}, + {metric: "cpu", function: toAggr("sum")}, + {metric: "cpu", function: toAggr("count"), isHidden: true}, + {metric: "disk", function: toAggr("count")}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("avg")}, + {metric: "cpu", function: toAggr("sum")}, + {metric: "cpu", function: toAggr("count"), isHidden: true}}, + "disk": {{metric: "disk", function: toAggr("count")}}}}, + + {params: SelectParams{Name: "cpu,diskio"}, + expectedSpecs: []columnMeta{{metric: "cpu", interpolationType: interpolateNext}, + {metric: "diskio", interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", interpolationType: interpolateNext}}, + "diskio": {{metric: "diskio", interpolationType: interpolateNext}}}}, + + {params: SelectParams{Name: "cpu, diskio", Functions: "sum,count"}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), interpolationType: interpolateNext}, + {metric: "diskio", function: toAggr("count"), interpolationType: interpolateNext}, + {metric: "diskio", function: toAggr("sum"), interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}}, + "diskio": {{metric: "diskio", function: toAggr("sum"), interpolationType: interpolateNext}, + {metric: "diskio", function: toAggr("count"), interpolationType: interpolateNext}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "sum", Interpolator: "linear"}, + {Metric: "cpu", Function: "count", Interpolator: "linear"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "sum", Interpolator: "linear"}, + {Metric: "cpu", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "avg", Interpolator: "linear"}, + {Metric: "cpu", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear, isHidden: true}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear, isHidden: true}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count", Interpolator: "linear"}, + {Metric: "diskio", Function: "count", Interpolator: "prev_val"}, + {Metric: "diskio", Function: "sum"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}, + {metric: "diskio", function: toAggr("count"), interpolationType: interpolatePrev}, + {metric: "diskio", function: toAggr("sum"), interpolationType: interpolatePrev}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}, + "diskio": { + {metric: "diskio", function: toAggr("count"), interpolationType: interpolatePrev}, + {metric: "diskio", function: toAggr("sum"), interpolationType: interpolatePrev}}}}, + } + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + ctx := selectQueryContext{} + ctx.queryParams = &test.params + columnsSpec, columnsSpecByMetric, err := ctx.createColumnSpecs() + + if err != nil { + t.Fatal(err) + } + assert.ElementsMatch(t, test.expectedSpecs, columnsSpec) + assert.Equal(t, test.expectedSpecsMap, columnsSpecByMetric) + }) + } +} + +func TestNegativeCreateColumnSpecs(t *testing.T) { + testCases := []struct { + desc string + params SelectParams + }{ + {params: SelectParams{Name: "cpu", Functions: "count, count"}}, + + {params: SelectParams{Name: "cpu", Functions: "count, max,count"}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}, + {Metric: "cpu", Function: "count"}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}, + {Metric: "diskio", Function: "count"}, + {Metric: "cpu", Function: "count"}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}, + {Metric: "diskio", Function: "count"}, + {Metric: "cpu", Function: " count "}}}}, + + {params: SelectParams{Name: "cpu", Functions: "count, count", UseOnlyClientAggr: true, disableClientAggr: true}}, + } + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + ctx := selectQueryContext{} + ctx.queryParams = &test.params + _, _, err := ctx.createColumnSpecs() + + if err == nil { + t.Fatal("expected error but finished normally") + } + }) + } +} + +func toAggr(str string) aggregate.AggrType { + aggr, _ := aggregate.FromString(str) + return aggr +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/series.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/series.go new file mode 100644 index 00000000..dbc67aa5 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/series.go @@ -0,0 +1,140 @@ +package pquerier + +import ( + "math" + "time" + + "github.com/v3io/frames" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func NewDataFrameColumnSeries(indexColumn, dataColumn, countColumn Column, labels utils.Labels, hash uint64, showAggregateLabel bool) *DataFrameColumnSeries { + // If we need to return the Aggregate label then add it, otherwise (for example in prometheus) return labels without it + aggString := dataColumn.GetColumnSpec().function.String() + if showAggregateLabel && aggString != "" { + labels = append(labels, utils.LabelsFromStringList(aggregate.AggregateLabel, aggString)...) + } + + wantedMetricName := dataColumn.GetColumnSpec().alias + if wantedMetricName == "" { + wantedMetricName = dataColumn.GetColumnSpec().metric + } + + // The labels we get from the Dataframe are agnostic to the metric name, since there might be several metrics in one Dataframe + labels = append(labels, utils.LabelsFromStringList(config.PrometheusMetricNameAttribute, wantedMetricName)...) + s := &DataFrameColumnSeries{labels: labels, key: hash} + s.iter = &dataFrameColumnSeriesIterator{indexColumn: indexColumn, dataColumn: dataColumn, countColumn: countColumn, currentIndex: -1} + return s +} + +// This series converts two columns into a series of time-value pairs +type DataFrameColumnSeries struct { + labels utils.Labels + key uint64 + iter utils.SeriesIterator +} + +func (s *DataFrameColumnSeries) Labels() utils.Labels { + return s.labels +} +func (s *DataFrameColumnSeries) Iterator() utils.SeriesIterator { return s.iter } +func (s *DataFrameColumnSeries) GetKey() uint64 { return s.key } + +type dataFrameColumnSeriesIterator struct { + dataColumn Column + indexColumn Column + countColumn Column // Count Column is needed to filter out empty buckets + + currentIndex int + err error +} + +func (it *dataFrameColumnSeriesIterator) Seek(seekT int64) bool { + if it.currentIndex >= it.dataColumn.Len() { + return false + } + t, _ := it.At() + if t >= seekT { + return true + } + + for it.Next() { + t, _ := it.At() + if t >= seekT { + return true + } + } + + return false +} + +func (it *dataFrameColumnSeriesIterator) At() (int64, float64) { + t, err := it.indexColumn.TimeAt(it.currentIndex) + if err != nil { + it.err = err + } + v, err := it.dataColumn.FloatAt(it.currentIndex) + if err != nil { + it.err = err + } + return t.UnixNano() / int64(time.Millisecond), v +} + +func (it *dataFrameColumnSeriesIterator) AtString() (int64, string) { + t, err := it.indexColumn.TimeAt(it.currentIndex) + if err != nil { + it.err = err + } + v, err := it.dataColumn.StringAt(it.currentIndex) + if err != nil { + it.err = err + } + return t.UnixNano() / int64(time.Millisecond), v +} + +func (it *dataFrameColumnSeriesIterator) Next() bool { + if it.err != nil { + return false + } + it.currentIndex = it.getNextValidCell(it.currentIndex) + + // It is enough to only check one of the columns since we assume they are both the same size + return it.currentIndex < it.indexColumn.Len() +} + +func (it *dataFrameColumnSeriesIterator) Err() error { return it.err } + +func (it *dataFrameColumnSeriesIterator) Encoding() chunkenc.Encoding { + enc := chunkenc.EncXOR + if it.dataColumn.DType() == frames.StringType { + enc = chunkenc.EncVariant + } + return enc +} + +func (it *dataFrameColumnSeriesIterator) getNextValidCell(from int) (nextIndex int) { + for nextIndex = from + 1; nextIndex < it.dataColumn.Len() && !it.doesCellHasData(nextIndex); nextIndex++ { + } + return +} + +func (it *dataFrameColumnSeriesIterator) doesCellHasData(cell int) bool { + // In case we don't have a count column (for example while down sampling) check if there is a real value at `cell` + if it.countColumn == nil { + f, err := it.dataColumn.FloatAt(cell) + if err != nil { + it.err = err + return false + } + return !math.IsNaN(f) + } + val, err := it.countColumn.FloatAt(cell) + if err != nil { + it.err = err + return false + } + return val > 0 +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go new file mode 100644 index 00000000..ccb5aefd --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go @@ -0,0 +1,179 @@ +package pquerier + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/utils" + "github.com/xwb1989/sqlparser" +) + +const emptyTableName = "dual" + +// ParseQuery Parses an sql query into `tsdb.selectParams` +// Currently supported syntax: +// select - selecting multiple metrics, aggregations, interpolation functions and aliasing +// from - only one table +// where - equality, and range operators. Not supporting regex,`IS NULL`, etc.. +// group by +func ParseQuery(sql string) (*SelectParams, string, error) { + stmt, err := sqlparser.Parse(sql) + if err != nil { + return nil, "", err + } + slct, ok := stmt.(*sqlparser.Select) + if !ok { + return nil, "", fmt.Errorf("not a SELECT statement") + } + + fromTable, err := getTableName(slct) + if err != nil { + return nil, "", err + } + + selectParams := &SelectParams{} + var columns []RequestedColumn + + for _, sexpr := range slct.SelectExprs { + currCol := RequestedColumn{} + switch col := sexpr.(type) { + case *sqlparser.AliasedExpr: + if !col.As.IsEmpty() { + currCol.Alias = col.As.String() + } + + switch expr := col.Expr.(type) { + case *sqlparser.FuncExpr: + err := parseFuncExpr(expr, &currCol) + if err != nil { + return nil, "", err + } + case *sqlparser.ColName: + currCol.Metric = removeBackticks(sqlparser.String(expr.Name)) + default: + return nil, "", fmt.Errorf("unknown columns type - %T", col.Expr) + } + columns = append(columns, currCol) + case *sqlparser.StarExpr: + // Appending empty column, meaning a column template for raw data + columns = append(columns, currCol) + default: + return nil, "", fmt.Errorf("unknown SELECT column type - %T", sexpr) + } + } + if len(columns) == 0 { + return nil, "", fmt.Errorf("no columns") + } + selectParams.RequestedColumns = columns + + if slct.Where != nil { + selectParams.Filter, _ = parseFilter(strings.TrimPrefix(sqlparser.String(slct.Where), " where ")) + } + if slct.GroupBy != nil { + selectParams.GroupBy = strings.TrimPrefix(sqlparser.String(slct.GroupBy), " group by ") + } + + err = validateColumnNames(selectParams) + if err != nil { + return nil, "", err + } + + return selectParams, fromTable, nil +} + +func parseFuncExpr(expr *sqlparser.FuncExpr, destCol *RequestedColumn) error { + possibleInterpolator := removeBackticks(sqlparser.String(expr.Name)) + if _, err := StrToInterpolateType(possibleInterpolator); err == nil { + destCol.Interpolator = possibleInterpolator + numOfParameters := len(expr.Exprs) + if numOfParameters == 1 { + collName := expr.Exprs[0].(*sqlparser.AliasedExpr).Expr.(*sqlparser.ColName) + destCol.Metric = sqlparser.String(collName) + } else if numOfParameters == 2 { + collName := expr.Exprs[0].(*sqlparser.AliasedExpr).Expr.(*sqlparser.ColName) + destCol.Metric = sqlparser.String(collName) + toleranceVal := expr.Exprs[1].(*sqlparser.AliasedExpr).Expr.(*sqlparser.SQLVal) + toleranceString := sqlparser.String(toleranceVal) + + // SQLVal cannot start with a number so it has to be surrounded with ticks. + // Stripping ticks + tolerance, err := utils.Str2duration(toleranceString[1 : len(toleranceString)-1]) + if err != nil { + return err + } + destCol.InterpolationTolerance = tolerance + } else { + return fmt.Errorf("unssoported number of parameters for function %v", possibleInterpolator) + } + } else { + destCol.Function = sqlparser.String(expr.Name) + + switch firstExpr := expr.Exprs[0].(type) { + case *sqlparser.AliasedExpr: + switch innerExpr := firstExpr.Expr.(type) { + case *sqlparser.ColName: + destCol.Metric = sqlparser.String(innerExpr.Name) + case *sqlparser.FuncExpr: + err := parseFuncExpr(innerExpr, destCol) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("could not parse expr")) + } + } + } + + if destCol.Metric == "" && destCol.Alias != "" { + return errors.New("cannot alias a wildcard") + } + } + + return nil +} + +func getTableName(slct *sqlparser.Select) (string, error) { + if nTables := len(slct.From); nTables != 1 { + return "", fmt.Errorf("select from multiple tables is not supported (got %d)", nTables) + } + aliased, ok := slct.From[0].(*sqlparser.AliasedTableExpr) + if !ok { + return "", fmt.Errorf("not a table select") + } + table, ok := aliased.Expr.(sqlparser.TableName) + if !ok { + return "", fmt.Errorf("not a table in FROM field") + } + + tableStr := table.Name.String() + if tableStr == emptyTableName { + return "", nil + } + return tableStr, nil +} +func parseFilter(originalFilter string) (string, error) { + return strings.Replace(originalFilter, " = ", " == ", -1), nil +} +func removeBackticks(origin string) string { + return strings.Replace(origin, "`", "", -1) +} + +func validateColumnNames(params *SelectParams) error { + names := make(map[string]bool) + requestedMetrics := make(map[string]bool) + + for _, column := range params.RequestedColumns { + columnName := column.GetColumnName() + if names[columnName] { + return fmt.Errorf("column name '%v' appears more than once in select query", columnName) + } + names[columnName] = true + requestedMetrics[column.Metric] = true + } + + for _, column := range params.RequestedColumns { + if column.Alias != "" && requestedMetrics[column.Alias] { + return fmt.Errorf("cannot use a metric name as an alias, alias: %v", column.Alias) + } + } + + return nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser_test.go new file mode 100644 index 00000000..82c228a7 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser_test.go @@ -0,0 +1,101 @@ +// +build unit + +package pquerier_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" +) + +func TestParseQuery(t *testing.T) { + testCases := []struct { + input string + output *pquerier.SelectParams + outputTable string + }{ + {input: "select columnA, columnB", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA"}, {Metric: "columnB"}}}}, + + {input: "select linear(columnA, '10m')", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", + Interpolator: "linear", + InterpolationTolerance: 10 * tsdbtest.MinuteInMillis}}}}, + + {input: "select max(prev_val(columnA)), avg(columnB)", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Interpolator: "prev_val", Function: "max"}, + {Metric: "columnB", Function: "avg"}}}}, + + {input: "select max(next_val(columnA)), avg(columnB)", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Interpolator: "next_val", Function: "max"}, + {Metric: "columnB", Function: "avg"}}}}, + + {input: "select max(prev_val(columnA, '1h')) as ahsheli, avg(columnB)", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", + Interpolator: "prev_val", + Function: "max", + Alias: "ahsheli", + InterpolationTolerance: tsdbtest.HoursInMillis}, + {Metric: "columnB", Function: "avg"}}}}, + + {input: "select columnA where columnB = 'tal' and columnC < 'Neiman'", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA"}}, Filter: "columnB == 'tal' and columnC < 'Neiman'"}}, + + {input: "select max(columnA) group by columnB", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Function: "max"}}, GroupBy: "columnB"}}, + + {input: "select min(columnA) as bambi, max(linear(columnB)) as bimba where columnB >= 123 group by columnB,columnC ", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Function: "min", Alias: "bambi"}, + {Metric: "columnB", Function: "max", Interpolator: "linear", Alias: "bimba"}}, + Filter: "columnB >= 123", GroupBy: "columnB, columnC"}}, + + {input: "select min(columnA) from my_table where columnB >= 123", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Function: "min"}}, + Filter: "columnB >= 123"}, + outputTable: "my_table"}, + + {input: "select * from my_table", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: ""}}}, + outputTable: "my_table"}, + + {input: `select * from 'my/table'`, + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: ""}}}, + outputTable: "my/table"}, + + {input: "select max(*), avg(*) from my_table", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "", Function: "max"}, {Metric: "", Function: "avg"}}}, + outputTable: "my_table"}, + } + for _, test := range testCases { + t.Run(test.input, func(tt *testing.T) { + queryParams, table, err := pquerier.ParseQuery(test.input) + if err != nil { + tt.Fatal(err) + } + + assert.Equal(tt, test.output, queryParams) + assert.Equal(tt, test.outputTable, table) + }) + } +} + +func TestNegativeParseQuery(t *testing.T) { + testCases := []struct { + input string + }{ + {input: "select columnA as something, columnB as something"}, + {input: "select avg(columnA) as something, columnB as something"}, + {input: "select avg(*) as something"}, + {input: "select avg(cpu), max(cpu) as cpu"}, + } + for _, test := range testCases { + t.Run(test.input, func(tt *testing.T) { + _, _, err := pquerier.ParseQuery(test.input) + if err == nil { + tt.Fatalf("expected error but finished successfully") + } + }) + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go new file mode 100644 index 00000000..e3e9b7be --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go @@ -0,0 +1,105 @@ +package pquerier + +import ( + "fmt" + "strings" + + "github.com/v3io/frames" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" +) + +// data and metadata passed to the query processor workers via a channel +type qryResults struct { + frame *dataFrame + query *partQuery + name string + fields map[string]interface{} + encoding chunkenc.Encoding +} + +func (q *qryResults) IsRawQuery() bool { return q.frame.isRawSeries } + +func (q *qryResults) IsDownsample() bool { + _, ok := q.frame.columnByName[q.name] + + return ok && q.query.step != 0 +} + +func (q *qryResults) IsServerAggregates() bool { + return q.query.aggregationParams != nil && q.query.useServerSideAggregates +} + +func (q *qryResults) IsClientAggregates() bool { + return q.query.aggregationParams != nil && !q.query.useServerSideAggregates +} + +type RequestedColumn struct { + Metric string + Alias string + Function string + Interpolator string + InterpolationTolerance int64 // tolerance in Millis +} + +func (col *RequestedColumn) isCrossSeries() bool { + return strings.HasSuffix(col.Function, aggregate.CrossSeriesSuffix) +} + +// If the function is cross series, remove the suffix otherwise leave it as is +func (col *RequestedColumn) GetFunction() string { + return strings.TrimSuffix(col.Function, aggregate.CrossSeriesSuffix) +} + +func (col *RequestedColumn) GetColumnName() string { + if col.Alias != "" { + return col.Alias + } + // If no aggregations are requested (raw down sampled data) + if col.Function == "" { + return col.Metric + } + return fmt.Sprintf("%v(%v)", col.Function, col.Metric) +} + +type columnMeta struct { + metric string + alias string + function aggregate.AggrType + functionParams []interface{} + interpolationType InterpolationType + interpolationTolerance int64 + isHidden bool // real columns = columns the user has specifically requested. Hidden columns = columns needed to calculate the real columns but don't show to the user +} + +// if a user specifies he wants all metrics +func (c *columnMeta) isWildcard() bool { return c.metric == "" } + +// Concrete Column = has real data behind it, Virtual column = described as a function on top of concrete columns +func (c columnMeta) isConcrete() bool { return c.function == 0 || aggregate.IsRawAggregate(c.function) } +func (c columnMeta) getColumnName() string { + if c.alias != "" { + return c.alias + } + // If no aggregations are requested (raw down sampled data) + if c.function == 0 { + return c.metric + } + return fmt.Sprintf("%v(%v)", c.function.String(), c.metric) +} + +// SeriesSet contains a set of series. +type FrameSet interface { + NextFrame() bool + GetFrame() (frames.Frame, error) + Err() error +} + +// Null-frame set +type nullFrameSet struct { + err error +} + +func (s nullFrameSet) NextFrame() bool { return false } +func (s nullFrameSet) GetFrame() (frames.Frame, error) { return nil, nil } +func (s nullFrameSet) Err() error { return s.err } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart.go new file mode 100644 index 00000000..b6a2befe --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart.go @@ -0,0 +1,195 @@ +package querier + +import ( + "sort" + + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func NewSetSorter(set utils.SeriesSet) (utils.SeriesSet, error) { + sorter := setSorter{} + sorter.set = set + sorter.index = -1 + + for set.Next() { + s := set.At() + i := sort.Search(len(sorter.list), func(i int) bool { return sorter.list[i].GetKey() >= s.GetKey() }) + sorter.list = append(sorter.list, nil) + copy(sorter.list[i+1:], sorter.list[i:]) + sorter.list[i] = s + } + if set.Err() != nil { + sorter.err = set.Err() + return nil, set.Err() + } + + return &sorter, nil +} + +type setSorter struct { + set utils.SeriesSet + list []utils.Series + index int + err error +} + +func (s *setSorter) Next() bool { + if s.index >= len(s.list)-1 { + return false + } + s.index++ + return true +} + +func (s *setSorter) At() utils.Series { + return s.list[s.index] +} + +func (s *setSorter) Err() error { return s.err } + +type IterSortMerger struct { + iters []utils.SeriesSet + done []bool + currKey uint64 + currInvalids []bool + currSeries []utils.Series + err error +} + +// Merge-sort multiple SeriesSets +func newIterSortMerger(sets []utils.SeriesSet) (utils.SeriesSet, error) { + newMerger := IterSortMerger{} + newMerger.iters = sets + newMerger.done = make([]bool, len(sets)) + newMerger.currInvalids = make([]bool, len(sets)) + return &newMerger, nil +} + +func (im *IterSortMerger) Next() bool { + + completed := true + keyIsSet := false + for i, iter := range im.iters { + if !im.currInvalids[i] { + im.done[i] = !iter.Next() + if iter.Err() != nil { + im.err = iter.Err() + return false + } + } + completed = completed && im.done[i] + if !im.done[i] { + key := iter.At().GetKey() + if !keyIsSet { + im.currKey = key + keyIsSet = true + } else if key < im.currKey { + im.currKey = key + } + } + } + + if completed { + return false + } + + im.currSeries = make([]utils.Series, 0, len(im.iters)) + for i, iter := range im.iters { + im.currInvalids[i] = true + if !im.done[i] { + if iter.At().GetKey() == im.currKey { + im.currInvalids[i] = false + im.currSeries = append(im.currSeries, iter.At()) + } + } + } + + return true +} + +// Return the current key and a list of iterators containing this key +func (im *IterSortMerger) At() utils.Series { + newSeries := mergedSeries{series: im.currSeries} + return &newSeries +} + +func (im *IterSortMerger) Err() error { + return im.err +} + +type mergedSeries struct { + series []utils.Series +} + +func (m *mergedSeries) Labels() utils.Labels { + return m.series[0].Labels() +} + +func (m *mergedSeries) Iterator() utils.SeriesIterator { + return newMergedSeriesIterator(m.series...) +} + +func (m *mergedSeries) GetKey() uint64 { + return m.series[0].GetKey() +} + +type mergedSeriesIterator struct { + series []utils.Series + i int + cur utils.SeriesIterator +} + +func newMergedSeriesIterator(s ...utils.Series) *mergedSeriesIterator { + return &mergedSeriesIterator{ + series: s, + i: 0, + cur: s[0].Iterator(), + } +} + +func (it *mergedSeriesIterator) Seek(t int64) bool { + // We just scan the merge series sequentially, as they are already + // pre-selected by time and should be accessed sequentially anyway. + for i, s := range it.series[it.i:] { + cur := s.Iterator() + if !cur.Seek(t) { + continue + } + it.cur = cur + it.i += i + return true + } + return false +} + +func (it *mergedSeriesIterator) Next() bool { + if it.cur.Next() { + return true + } + if err := it.cur.Err(); err != nil { + return false + } + if it.i == len(it.series)-1 { + return false + } + + it.i++ + it.cur = it.series[it.i].Iterator() + + return it.Next() +} + +func (it *mergedSeriesIterator) At() (t int64, v float64) { + return it.cur.At() +} + +func (it *mergedSeriesIterator) AtString() (t int64, v string) { return it.cur.AtString() } + +func (it *mergedSeriesIterator) Err() error { + return it.cur.Err() +} + +func (it *mergedSeriesIterator) Encoding() chunkenc.Encoding { + return chunkenc.EncXOR +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart_test.go new file mode 100644 index 00000000..bd77a960 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart_test.go @@ -0,0 +1,73 @@ +// +build unit + +package querier + +import ( + "testing" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testIterSortMergerSuite struct { + suite.Suite +} + +type mockSeriesSet struct { + s []utils.Series + init bool +} + +func (m *mockSeriesSet) Next() bool { + if !m.init { + m.init = true + } else if len(m.s) > 1 { + m.s = m.s[1:] + } else { + return false + } + return true +} + +func (m *mockSeriesSet) At() utils.Series { + return m.s[0] +} + +func (m *mockSeriesSet) Err() error { + return nil +} + +type stubSeries uint64 + +func (stubSeries) Labels() utils.Labels { + panic("stub") +} + +func (stubSeries) Iterator() utils.SeriesIterator { + panic("stub") +} + +func (s stubSeries) GetKey() uint64 { + return uint64(s) +} + +func (suite *testIterSortMergerSuite) TestIterSortMerger() { + + s1 := []utils.Series{stubSeries(0), stubSeries(1)} + s2 := []utils.Series{stubSeries(2), stubSeries(3)} + iter, err := newIterSortMerger([]utils.SeriesSet{&mockSeriesSet{s: s1}, &mockSeriesSet{s: s2}}) + + suite.Require().Nil(err) + suite.Require().True(iter.Next()) + suite.Require().Equal(uint64(0), iter.At().GetKey()) + suite.Require().True(iter.Next()) + suite.Require().Equal(uint64(1), iter.At().GetKey()) + suite.Require().True(iter.Next()) + suite.Require().Equal(uint64(2), iter.At().GetKey()) + suite.Require().True(iter.Next()) + suite.Require().Equal(uint64(3), iter.At().GetKey()) +} + +func TestIterSortMergerSuiteSuite(t *testing.T) { + suite.Run(t, new(testIterSortMergerSuite)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go new file mode 100644 index 00000000..94879789 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go @@ -0,0 +1,363 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package querier + +import ( + "sort" + "strings" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Create a new Querier interface +func NewV3ioQuerier(container v3io.Container, logger logger.Logger, mint, maxt int64, + cfg *config.V3ioConfig, partMngr *partmgr.PartitionManager) *V3ioQuerier { + newQuerier := V3ioQuerier{ + container: container, + mint: mint, maxt: maxt, + logger: logger.GetChild("Querier"), + cfg: cfg, + } + newQuerier.partitionMngr = partMngr + newQuerier.performanceReporter = performance.ReporterInstanceFromConfig(cfg) + return &newQuerier +} + +type V3ioQuerier struct { + logger logger.Logger + container v3io.Container + cfg *config.V3ioConfig + mint, maxt int64 + partitionMngr *partmgr.PartitionManager + performanceReporter *performance.MetricReporter +} + +type SelectParams struct { + Name string + Functions string + Step int64 + Windows []int + Filter string + + disableAllAggr bool + disableClientAggr bool +} + +// Standard Time Series Query, return a set of series which match the condition +func (q *V3ioQuerier) Select(name, functions string, step int64, filter string) (utils.SeriesSet, error) { + + return q.selectQry(&SelectParams{ + Name: name, + Functions: functions, + Step: step, + Filter: filter, + disableClientAggr: q.cfg.DisableClientAggr, + }) + +} + +// Prometheus time-series query - return a set of time series that match the +// specified conditions +func (q *V3ioQuerier) SelectProm(name, functions string, step int64, filter string, noAggr bool) (utils.SeriesSet, error) { + + return q.selectQry(&SelectParams{ + Name: name, + Functions: functions, + Step: step, + Filter: filter, + disableClientAggr: true, + disableAllAggr: noAggr, + }) +} + +// Overlapping windows time-series query - return a set of series each with a +// list of aggregated results per window +// For example, get the last 1h, 6h, and 24h stats per metric (specify a 1h +// aggregation interval (step) of 3600*1000 (=1h), windows 1, 6, and 24, and an +// end (max) time). +func (q *V3ioQuerier) SelectOverlap(name, functions string, step int64, windows []int, filter string) (utils.SeriesSet, error) { + sort.Sort(sort.Reverse(sort.IntSlice(windows))) + + return q.selectQry(&SelectParams{ + Name: name, + Functions: functions, + Step: step, + Filter: filter, + Windows: windows, + disableClientAggr: q.cfg.DisableClientAggr, + }) +} + +// Base query function +func (q *V3ioQuerier) selectQry(params *SelectParams) (set utils.SeriesSet, err error) { + + err = q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return utils.NullSeriesSet{}, errors.Wrap(err, "Failed to read/update the TSDB schema.") + } + + set = utils.NullSeriesSet{} + + q.logger.Debug("Select query:\n\tMetric: %s\n\tStart Time: %s (%d)\n\tEnd Time: %s (%d)\n\tFunction: %s\n\t"+ + "Step: %d\n\tFilter: %s\n\tWindows: %v\n\tDisable All Aggr: %t\n\tDisable Client Aggr: %t", + params.Name, time.Unix(q.mint/1000, 0).String(), q.mint, time.Unix(q.maxt/1000, 0).String(), + q.maxt, params.Functions, params.Step, + params.Filter, params.Windows, params.disableAllAggr, params.disableClientAggr) + + q.performanceReporter.WithTimer("QueryTimer", func() { + params.Filter = strings.Replace(params.Filter, "__name__", "_name", -1) + + parts := q.partitionMngr.PartsForRange(q.mint, q.maxt, true) + if len(parts) == 0 { + return + } + + if len(parts) == 1 { + set, err = q.queryNumericPartition(parts[0], params) + return + } + + sets := make([]utils.SeriesSet, len(parts)) + for i, part := range parts { + set, err = q.queryNumericPartition(part, params) + if err != nil { + set = utils.NullSeriesSet{} + return + } + sets[i] = set + } + + // Sort each partition + /* TODO: Removed condition that applies sorting only on non range scan queries to fix bug with series coming OOO when querying multi partitions, + Need to think of a better solution. + */ + for i := 0; i < len(sets); i++ { + // TODO make it a Go routine per part + sorter, error := NewSetSorter(sets[i]) + if error != nil { + set = utils.NullSeriesSet{} + err = error + return + } + sets[i] = sorter + } + + set, err = newIterSortMerger(sets) + return + }) + + return +} + +// Query a single partition (with integer or float values) +func (q *V3ioQuerier) queryNumericPartition(partition *partmgr.DBPartition, params *SelectParams) (utils.SeriesSet, error) { + + mint, maxt := partition.GetPartitionRange() + step := params.Step + + if q.maxt < maxt { + maxt = q.maxt + } + + if q.mint > mint { + mint = q.mint + if step != 0 && step < (maxt-mint) { + // Temporary aggregation fix: if mint isn't aligned with the step, + // move it to the next step tick + mint += (maxt - mint) % step + } + } + + newSet := &V3ioSeriesSet{mint: mint, maxt: maxt, partition: partition, logger: q.logger} + + // If there are no aggregation functions and the aggregation-interval (step) + // size is greater than the stored aggregate, use the Average aggregate. + // TODO: When not using the Prometheus TSDB, we may want an avg aggregate + // for any step>0 in the Prometheus range vectors using seek, and it would + // be inefficient to use an avg aggregate. + functions := params.Functions + if functions == "" && step > 0 && step >= partition.RollupTime() && partition.AggrType().HasAverage() { + functions = "avg" + } + + // Check whether there are aggregations to add and aggregates aren't disabled + if functions != "" && !params.disableAllAggr { + + // If step isn't passed (e.g., when using the console), the step is the + // difference between the end (maxt) and start (mint) times (e.g., 5 minutes) + if step == 0 { + step = maxt - mint + } + + if step > partition.RollupTime() && params.disableClientAggr { + step = partition.RollupTime() + } + + newAggrSeries, err := aggregate.NewAggregateSeries(functions, + "v", + partition.AggrBuckets(), + step, + partition.RollupTime(), + params.Windows) + + if err != nil { + return nil, err + } + + // Use aggregates if possible on the TSDB side or if client aggregation + // is enabled (Prometheus is disabled on the client side) + newSet.canAggregate = newAggrSeries.CanAggregate(partition.AggrType()) + if newSet.canAggregate || !params.disableClientAggr { + newSet.aggrSeries = newAggrSeries + newSet.interval = step + newSet.aggrIdx = newAggrSeries.NumFunctions() - 1 + newSet.overlapWin = params.Windows + newSet.noAggrLbl = params.disableClientAggr // Don't add an "Aggregate" label in Prometheus (see aggregate.AggregateLabel) + } + } + + err := newSet.getItems(partition, params.Name, params.Filter, q.container, q.cfg.QryWorkers) + + return newSet, err +} + +// Return the current metric names +func (q *V3ioQuerier) LabelValues(labelKey string) (result []string, err error) { + q.performanceReporter.WithTimer("LabelValuesTimer", func() { + if labelKey == "__name__" { + result, err = q.getMetricNames() + } else { + result, err = q.getLabelValues(labelKey) + } + }) + return +} + +func (q *V3ioQuerier) LabelNames() ([]string, error) { + return nil, nil +} + +func (q *V3ioQuerier) Close() error { + return nil +} + +func (q *V3ioQuerier) getMetricNames() ([]string, error) { + input := v3io.GetItemsInput{ + Path: q.cfg.TablePath + "/names/", + AttributeNames: []string{"__name"}, + } + + iter, err := utils.NewAsyncItemsCursor(q.container, &input, q.cfg.QryWorkers, []string{}, q.logger) + if err != nil { + return nil, err + } + + var metricNames []string + + for iter.Next() { + metricNames = append(metricNames, iter.GetField("__name").(string)) + } + + sort.Sort(sort.StringSlice(metricNames)) + + if iter.Err() != nil { + q.logger.InfoWith("Failed to read metric names; returning an empty list.", "err", iter.Err().Error()) + } + + return metricNames, nil +} + +func (q *V3ioQuerier) getLabelValues(labelKey string) ([]string, error) { + + // Sync the partition manager (hack) + err := q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return nil, err + } + + partitionPaths := q.partitionMngr.GetPartitionsPaths() + var numPartitions = len(partitionPaths) + + // If there are no partitions yet, there are no labels + if numPartitions == 0 { + return nil, nil + } + + //take the last FULL partition (unless there is only 1-2 partitions) + var partitionIndex = numPartitions - 1 + if numPartitions > 2 { + partitionIndex-- + } + + labelValuesMap := map[string]struct{}{} + + // Get all label sets + input := v3io.GetItemsInput{ + Path: partitionPaths[partitionIndex], + AttributeNames: []string{"_lset"}, + } + + iter, err := utils.NewAsyncItemsCursor(q.container, &input, q.cfg.QryWorkers, []string{}, q.logger) + if err != nil { + return nil, err + } + + // Iterate over the results + for iter.Next() { + labelSet := iter.GetField("_lset").(string) + + // For a label set of k1=v1,k2=v2, k2=v3, for labelKey "k2", for example, + // we want to convert the set to [v2, v3] + + // Split at "," to get k=v pairs + for _, label := range strings.Split(labelSet, ",") { + + // Split at "=" to get the label key and label value + splitLabel := strings.SplitN(label, "=", 2) + + // If we have two elements and the first element (the key) is equal + // to what we're looking for, save the label value in the map. + // Use a map to prevent duplicates. + if len(splitLabel) == 2 && splitLabel[0] == labelKey { + labelValuesMap[splitLabel[1]] = struct{}{} + } + } + } + + if iter.Err() != nil { + q.logger.InfoWith("Failed to read label values, returning empty list", "err", iter.Err().Error()) + } + + var labelValues []string + for labelValue := range labelValuesMap { + labelValues = append(labelValues, labelValue) + } + + return labelValues, nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go new file mode 100644 index 00000000..6d821855 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go @@ -0,0 +1,310 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package querier + +import ( + "strings" + + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Create a new series from chunks +func NewSeries(set *V3ioSeriesSet) utils.Series { + newSeries := V3ioSeries{set: set} + newSeries.lset = initLabels(set) + newSeries.initSeriesIter() + return &newSeries +} + +type V3ioSeries struct { + set *V3ioSeriesSet + lset utils.Labels + iter utils.SeriesIterator + hash uint64 +} + +func (s *V3ioSeries) Labels() utils.Labels { return s.lset } + +// Get the unique series key for sorting +func (s *V3ioSeries) GetKey() uint64 { + if s.hash == 0 { + val, err := s.lset.HashWithMetricName() + if err != nil { + s.set.logger.Error(err) + return 0 + } + s.hash = val + + } + return s.hash +} + +func (s *V3ioSeries) Iterator() utils.SeriesIterator { return s.iter } + +// Initialize the label set from _lset and _name attributes +func initLabels(set *V3ioSeriesSet) utils.Labels { + name, nok := set.iter.GetField("_name").(string) + if !nok { + name = "UNKNOWN" + } + lsetAttr, lok := set.iter.GetField("_lset").(string) + if !lok { + lsetAttr = "UNKNOWN" + } + if !lok || !nok { + set.logger.Error("Error in initLabels; bad field values.") + } + + lset := utils.Labels{utils.Label{Name: "__name__", Value: name}} + + splitLset := strings.Split(lsetAttr, ",") + for _, label := range splitLset { + kv := strings.Split(label, "=") + if len(kv) > 1 { + lset = append(lset, utils.Label{Name: kv[0], Value: kv[1]}) + } + } + + return lset +} + +// Initialize the series from values, metadata, and attributes +func (s *V3ioSeries) initSeriesIter() { + + maxt := s.set.maxt + maxTime := s.set.iter.GetField(config.MaxTimeAttrName) + if maxTime != nil && int64(maxTime.(int)) < maxt { + maxt = int64(maxTime.(int)) + } + + newIterator := v3ioSeriesIterator{ + mint: s.set.mint, maxt: maxt} + newIterator.chunks = []chunkenc.Chunk{} + newIterator.chunksMax = []int64{} + + // Create and initialize a chunk encoder per chunk blob + for i, attr := range s.set.attrs { + values := s.set.iter.GetField(attr) + + if values != nil { + bytes := values.([]byte) + chunk, err := chunkenc.FromData(s.set.logger, chunkenc.EncXOR, bytes, 0) + if err != nil { + s.set.logger.ErrorWith("Error reading chunk buffer", "Lset", s.lset, "err", err) + } else { + newIterator.chunks = append(newIterator.chunks, chunk) + newIterator.chunksMax = append(newIterator.chunksMax, + s.set.chunk0Time+int64(i+1)*s.set.partition.TimePerChunk()-1) + } + } + + } + + if len(newIterator.chunks) == 0 { + // If there's no data, create a null iterator + s.iter = &utils.NullSeriesIterator{} + } else { + newIterator.iter = newIterator.chunks[0].Iterator() + s.iter = &newIterator + } +} + +// Chunk-list series iterator +type v3ioSeriesIterator struct { + mint, maxt int64 // TBD per block + err error + + chunks []chunkenc.Chunk + + chunkIndex int + chunksMax []int64 + iter chunkenc.Iterator +} + +// Advance the iterator to the specified chunk and time +func (it *v3ioSeriesIterator) Seek(t int64) bool { + + // Seek time is after the item's end time (maxt) + if t > it.maxt { + return false + } + + // Seek to the first valid value after t + if t < it.mint { + t = it.mint + } + + // Check the first element + t0, _ := it.iter.At() + if t0 > it.maxt { + return false + } + if t <= t0 { + return true + } + + for { + if it.iter.Next() { + t0, _ := it.iter.At() + if t0 > it.maxt { + return false + } + if t > it.chunksMax[it.chunkIndex] { + // This chunk is too far behind; move to the next chunk or + // Return false if it's the last chunk + if it.chunkIndex == len(it.chunks)-1 { + return false + } + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + } else if t <= t0 { + // The cursor (t0) is either on t or just passed t + return true + } + } else { + // End of chunk; move to the next chunk or return if last + if it.chunkIndex == len(it.chunks)-1 { + return false + } + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + } + } +} + +// Move to the next iterator item +func (it *v3ioSeriesIterator) Next() bool { + if it.iter.Next() { + t, _ := it.iter.At() + if t < it.mint { + if !it.Seek(it.mint) { + return false + } + t, _ = it.At() + + return t <= it.maxt + } + if t <= it.maxt { + return true + } + return false + } + + if err := it.iter.Err(); err != nil { + return false + } + if it.chunkIndex == len(it.chunks)-1 { + return false + } + + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + return it.Next() +} + +// Read the time and value at the current location +func (it *v3ioSeriesIterator) At() (t int64, v float64) { return it.iter.At() } + +func (it *v3ioSeriesIterator) AtString() (t int64, v string) { return it.iter.AtString() } + +func (it *v3ioSeriesIterator) Err() error { return it.iter.Err() } + +func (it *v3ioSeriesIterator) Encoding() chunkenc.Encoding { return chunkenc.EncXOR } + +// Aggregates (count, avg, sum, ..) series and iterator + +func NewAggrSeries(set *V3ioSeriesSet, aggr aggregate.AggrType) *V3ioSeries { + newSeries := V3ioSeries{set: set} + lset := initLabels(set) + if !set.noAggrLbl { + lset = append(lset, utils.Label{Name: aggregate.AggregateLabel, Value: aggr.String()}) + } + newSeries.lset = lset + + if set.nullSeries { + newSeries.iter = &utils.NullSeriesIterator{} + } else { + + // `set` - the iterator "iterates" over stateful data - it holds a + // "current" set and aggrSet. This requires copying all the required + // stateful data into the iterator (e.g., aggrSet) so that when it's + // evaluated it will hold the proper pointer. + newSeries.iter = &aggrSeriesIterator{ + set: set, + aggrSet: set.aggrSet, + aggrType: aggr, + index: -1, + } + } + + return &newSeries +} + +type aggrSeriesIterator struct { + set *V3ioSeriesSet + aggrSet *aggregate.Set + aggrType aggregate.AggrType + index int + err error +} + +// Advance an iterator to the specified time (t) +func (s *aggrSeriesIterator) Seek(t int64) bool { + if t <= s.set.baseTime { + s.index = s.getNextValidCell(-1) + return true + } + + if t > s.set.baseTime+int64(s.aggrSet.GetMaxCell())*s.set.interval { + return false + } + + s.index = int((t - s.set.baseTime) / s.set.interval) + return true +} + +// Advance an iterator to the next time interval/bucket +func (s *aggrSeriesIterator) Next() bool { + // Advance the index to the next non-empty cell + s.index = s.getNextValidCell(s.index) + return s.index <= s.aggrSet.GetMaxCell() +} + +func (s *aggrSeriesIterator) getNextValidCell(from int) (nextIndex int) { + for nextIndex = from + 1; nextIndex <= s.aggrSet.GetMaxCell() && !s.aggrSet.HasData(nextIndex); nextIndex++ { + } + return +} + +// Return the time and value at the current bucket +func (s *aggrSeriesIterator) At() (t int64, v float64) { + val, _ := s.aggrSet.GetCellValue(s.aggrType, s.index) + return s.aggrSet.GetCellTime(s.set.baseTime, s.index), val +} + +func (s *aggrSeriesIterator) AtString() (t int64, v string) { return 0, "" } + +func (s *aggrSeriesIterator) Encoding() chunkenc.Encoding { return chunkenc.EncXOR } + +func (s *aggrSeriesIterator) Err() error { return s.err } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go new file mode 100644 index 00000000..ab2bbbb4 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go @@ -0,0 +1,235 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package querier + +import ( + "github.com/nuclio/logger" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// holds the query result set +type V3ioSeriesSet struct { + err error + logger logger.Logger + partition *partmgr.DBPartition + iter utils.ItemsCursor + mint, maxt int64 + attrs []string + chunk0Time int64 + + interval int64 + nullSeries bool + overlapWin []int + aggrSeries *aggregate.Series + aggrIdx int + canAggregate bool + currSeries utils.Series + aggrSet *aggregate.Set + noAggrLbl bool + baseTime int64 +} + +// Get relevant items and attributes from the TSDB and create an iterator +// TODO: get items per partition + merge, per partition calc attrs +func (s *V3ioSeriesSet) getItems(partition *partmgr.DBPartition, name, filter string, container v3io.Container, workers int) error { + + path := partition.GetTablePath() + shardingKeys := []string{} + if name != "" { + shardingKeys = partition.GetShardingKeys(name) + } + attrs := []string{config.LabelSetAttrName, config.OutOfOrderAttrName, "_name", config.MaxTimeAttrName} + + if s.aggrSeries != nil && s.canAggregate { + s.attrs = s.aggrSeries.GetAttrNames() + } else { + s.attrs, s.chunk0Time = s.partition.Range2Attrs("v", s.mint, s.maxt) + } + attrs = append(attrs, s.attrs...) + + s.logger.DebugWith("Select - GetItems", "path", path, "attr", attrs, "filter", filter, "name", name) + input := v3io.GetItemsInput{Path: path, AttributeNames: attrs, Filter: filter, ShardingKey: name} + iter, err := utils.NewAsyncItemsCursor(container, &input, workers, shardingKeys, s.logger) + if err != nil { + return err + } + + s.iter = iter + return nil + +} + +// Advance to the next series +func (s *V3ioSeriesSet) Next() bool { + + // Create a raw-chunks series (not aggregated) + if s.aggrSeries == nil { + if s.iter.Next() { + s.currSeries = NewSeries(s) + return true + } + return false + } + + // Create multiple aggregation series (one per aggregation function). + // The index is initialized as numfunc-1 (so the first +1 and modulo will equal 0). + if s.aggrIdx == s.aggrSeries.NumFunctions()-1 { + // If there are no more items (from GetItems cursor), return with EOF + if !s.iter.Next() { + return false + } + + s.nullSeries = false + + if s.canAggregate { + + // Create a series from aggregation arrays (in the TSDB table) if + // the partition stores the desired aggregates + maxtUpdate := s.maxt + maxTime := s.iter.GetField(config.MaxTimeAttrName) + if maxTime != nil && int64(maxTime.(int)) < s.maxt { + maxtUpdate = int64(maxTime.(int)) + } + + start := s.partition.Time2Bucket(s.mint) + end := s.partition.Time2Bucket(s.maxt+s.interval) + 1 + + // Calculate the length of the returned array: time-range/interval + 2 + length := int((maxtUpdate-s.mint)/s.interval) + 2 + + if s.overlapWin != nil { + s.baseTime = s.maxt + } else { + s.baseTime = s.mint + } + + if length > 0 { + attrs := s.iter.GetFields() + aggrSet, err := s.aggrSeries.NewSetFromAttrs(length, start, end, s.mint, s.maxt, &attrs) + if err != nil { + s.err = err + return false + } + + s.aggrSet = aggrSet + } else { + s.nullSeries = true + } + + } else { + + // Create a series from raw chunks + s.currSeries = NewSeries(s) + + // Calculate the number of cells: (maxt-mint)/interval + 1 + numCells := (s.maxt-s.mint)/s.interval + 1 + + s.aggrSet = s.aggrSeries.NewSetFromChunks(int(numCells)) + if s.overlapWin != nil { + s.chunks2WindowedAggregates() + } else { + s.chunks2IntervalAggregates() + } + + } + } + + s.aggrIdx = (s.aggrIdx + 1) % s.aggrSeries.NumFunctions() + return true +} + +// Convert raw chunks to a fixed-interval aggregate +func (s *V3ioSeriesSet) chunks2IntervalAggregates() { + + iter := s.currSeries.Iterator() + if iter.Next() { + + s.baseTime = s.mint + + for { + t, v := iter.At() + s.aggrSet.AppendAllCells(int((t-s.baseTime)/s.interval), v) + if !iter.Next() { + break + } + } + } + + if iter.Err() != nil { + s.err = iter.Err() + return + } +} + +// Convert chunks to an overlapping-windows aggregate +func (s *V3ioSeriesSet) chunks2WindowedAggregates() { + + maxAligned := (s.maxt / s.interval) * s.interval + //baseTime := maxAligned - int64(s.overlapWin[0])*s.interval + + iter := s.currSeries.Iterator() + + if iter.Seek(s.baseTime) { + + if iter.Err() != nil { + s.err = iter.Err() + return + } + + s.baseTime = maxAligned + + for { + t, v := iter.At() + if t < maxAligned { + for i, win := range s.overlapWin { + if t > maxAligned-int64(win)*s.interval { + s.aggrSet.AppendAllCells(i, v) + } + } + } + if !iter.Next() { + s.err = iter.Err() + break + } + } + } +} + +// Return the current error +func (s *V3ioSeriesSet) Err() error { + if s.iter.Err() != nil { + return s.iter.Err() + } + return s.err +} + +// Return a series iterator +func (s *V3ioSeriesSet) At() utils.Series { + if s.aggrSeries == nil { + return s.currSeries + } + + return NewAggrSeries(s, s.aggrSeries.GetFunctions()[s.aggrIdx]) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go new file mode 100644 index 00000000..e3ee3fe7 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go @@ -0,0 +1,1141 @@ +// +build integration + +package tsdb_test + +import ( + "fmt" + "math" + "path" + "strconv" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + v3io "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + . "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func timeStringToMillis(timeStr string) int64 { + ta, _ := time.Parse(time.RFC3339, timeStr) + return ta.Unix() * 1000 +} +func TestDeleteTable(t *testing.T) { + ta, _ := time.Parse(time.RFC3339, "2018-10-03T05:00:00Z") + t1 := ta.Unix() * 1000 + tb, _ := time.Parse(time.RFC3339, "2018-10-07T05:00:00Z") + t2 := tb.Unix() * 1000 + tc, _ := time.Parse(time.RFC3339, "2018-10-11T05:00:00Z") + t3 := tc.Unix() * 1000 + td, _ := time.Parse(time.RFC3339, "2022-10-11T05:00:00Z") + futurePoint := td.Unix() * 1000 + + defaultTimeMillis := timeStringToMillis("2019-07-21T00:00:00Z") + generalData := []tsdbtest.DataPoint{ + // partition 1 + // chunk a + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + // partition 2 + // chunk a + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + // partition 3 + // chunk a + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}} + partitions1StartTime := timeStringToMillis("2019-07-21T00:00:00Z") + partitions2StartTime := timeStringToMillis("2019-07-23T00:00:00Z") + partitions3StartTime := timeStringToMillis("2019-07-25T00:00:00Z") + + testCases := []struct { + desc string + deleteParams DeleteParams + data tsdbtest.TimeSeries + expectedData map[string][]tsdbtest.DataPoint + expectedPartitions []int64 + ignoreReason string + }{ + {desc: "Should delete all table by time", + deleteParams: DeleteParams{ + From: 0, + To: 9999999999999, + IgnoreErrors: true, + }, + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}, + {Time: futurePoint, Value: 555.5}}, + }}, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, + }, + {desc: "Should delete all table by deleteAll", + deleteParams: DeleteParams{ + From: 0, + To: t1, + DeleteAll: true, + IgnoreErrors: true, + }, + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}, + {Time: futurePoint, Value: 555.5}}, + }}, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, + }, + {desc: "Should delete whole partitions", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Filter: "os == 'win'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-win": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + + { + desc: "Should delete partial chunk in the start", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 4*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk in the middle", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 3*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 7*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk in the end", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + }, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + }, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partially last chunk and update max time", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions3StartTime + 1*tsdbtest.HoursInMillis + 6*tsdbtest.MinuteInMillis, + To: partitions3StartTime + 1*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole last chunk and update max time", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions3StartTime + 1*tsdbtest.HoursInMillis, + To: partitions3StartTime + 2*tsdbtest.HoursInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole all samples in chunk but time range is not bigger then chunk", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 1*tsdbtest.HoursInMillis + 2*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 2*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testDeleteTSDBCase(t, + tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptDropTableOnTearDown, + Value: !test.deleteParams.DeleteAll}, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: test.data}, + ), test.deleteParams, test.expectedData, test.expectedPartitions) + }) + } +} + +func getCurrentPartitions(test *testing.T, container v3io.Container, path string) []int64 { + input := &v3io.GetItemInput{Path: path + "/.schema", + AttributeNames: []string{"*"}} + res, err := container.GetItemSync(input) + if err != nil { + test.Fatal(errors.Wrap(err, "failed to get schema")) + } + output := res.Output.(*v3io.GetItemOutput) + var partitions []int64 + for part := range output.Item { + partitionsStartTime, _ := strconv.ParseInt(part[1:], 10, 64) // parse attribute and discard attribute prefix + partitions = append(partitions, partitionsStartTime) + } + return partitions +} + +func testDeleteTSDBCase(test *testing.T, testParams tsdbtest.TestParams, deleteParams DeleteParams, + expectedData map[string][]tsdbtest.DataPoint, expectedPartitions []int64) { + + adapter, teardown := tsdbtest.SetUpWithData(test, testParams) + defer teardown() + + container, err := utils.CreateContainer(adapter.GetLogger("container"), testParams.V3ioConfig(), adapter.HTTPTimeout) + if err != nil { + test.Fatalf("failed to create new container. reason: %s", err) + } + + if err := adapter.DeleteDB(deleteParams); err != nil { + test.Fatalf("Failed to delete DB. reason: %s", err) + } + + if !deleteParams.DeleteAll { + actualPartitions := getCurrentPartitions(test, container, testParams.V3ioConfig().TablePath) + assert.ElementsMatch(test, expectedPartitions, actualPartitions, "remaining partitions are not as expected") + + qry, err := adapter.QuerierV2() + if err != nil { + test.Fatalf("Failed to create Querier. reason: %v", err) + } + + params := &pquerier.SelectParams{ + From: 0, + To: math.MaxInt64, + Filter: "1==1", + } + set, err := qry.Select(params) + if err != nil { + test.Fatalf("Failed to run Select. reason: %v", err) + } + + for set.Next() { + series := set.At() + labels := series.Labels() + osLabel := labels.Get("os") + metricName := labels.Get(config.PrometheusMetricNameAttribute) + iter := series.Iterator() + if iter.Err() != nil { + test.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + + actual, err := iteratorToSlice(iter) + if err != nil { + test.Fatal(err) + } + expectedDataKey := metricName + if osLabel != "" { + expectedDataKey = fmt.Sprintf("%v-%v", expectedDataKey, osLabel) + } + + assert.ElementsMatch(test, expectedData[expectedDataKey], actual, + "result data for '%v' didn't match, expected: %v\n actual: %v\n", expectedDataKey, expectedData[expectedDataKey], actual) + + } + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + } else { + container, tablePath := adapter.GetContainer() + tableSchemaPath := path.Join(tablePath, config.SchemaConfigFileName) + + // Validate: schema does not exist + _, err := container.GetObjectSync(&v3io.GetObjectInput{Path: tableSchemaPath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tableSchemaPath, err) + } + } + + // Validate: table does not exist + _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: tablePath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tablePath, err) + } + } + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go new file mode 100644 index 00000000..822d7224 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go @@ -0,0 +1,197 @@ +package schema + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const ( + Version = 4 + MaxV3ioArraySize = 130000 +) + +func NewSchema(v3ioCfg *config.V3ioConfig, samplesIngestionRate, aggregationGranularity, aggregatesList string, crossLabelSets string) (*config.Schema, error) { + return newSchema( + samplesIngestionRate, + aggregationGranularity, + aggregatesList, + crossLabelSets, + v3ioCfg.MinimumChunkSize, + v3ioCfg.MaximumChunkSize, + v3ioCfg.MaximumSampleSize, + v3ioCfg.MaximumPartitionSize, + config.DefaultSampleRetentionTime, + v3ioCfg.ShardingBucketsCount) +} + +func newSchema(samplesIngestionRate, aggregationGranularity, aggregatesList string, crossLabelSets string, minChunkSize, maxChunkSize, maxSampleSize, maxPartitionSize, sampleRetention, shardingBucketsCount int) (*config.Schema, error) { + rateInHours, err := rateToHours(samplesIngestionRate) + if err != nil { + return nil, errors.Wrapf(err, "Invalid samples ingestion rate (%s).", samplesIngestionRate) + } + + chunkInterval, partitionInterval, err := calculatePartitionAndChunkInterval(rateInHours, minChunkSize, maxChunkSize, maxSampleSize, maxPartitionSize) + if err != nil { + return nil, errors.Wrap(err, "Failed to calculate the chunk interval.") + } + + aggregates, err := aggregate.RawAggregatesToStringList(aggregatesList) + if err != nil { + return nil, errors.Wrapf(err, "Failed to parse aggregates list '%s'.", aggregatesList) + } + + if err := validateAggregatesGranularity(aggregationGranularity, partitionInterval, len(aggregates) > 0); err != nil { + return nil, err + } + + parsedCrossLabelSets := aggregate.ParseCrossLabelSets(crossLabelSets) + + if len(parsedCrossLabelSets) > 0 && len(aggregates) == 0 { + return nil, errors.New("Cross label aggregations must be used in conjunction with aggregations") + } + + if len(aggregates) == 0 { + aggregates = strings.Split(config.DefaultAggregates, ",") + } + + defaultRollup := config.Rollup{ + Aggregates: aggregates, + AggregationGranularity: aggregationGranularity, + StorageClass: config.DefaultStorageClass, + SampleRetention: sampleRetention, //TODO: make configurable + LayerRetentionTime: config.DefaultLayerRetentionTime, + } + + var preaggregates []config.PreAggregate + for _, labelSet := range parsedCrossLabelSets { + preaggregate := config.PreAggregate{ + Labels: labelSet, + Granularity: aggregationGranularity, + Aggregates: aggregates, + } + preaggregates = append(preaggregates, preaggregate) + } + + tableSchema := config.TableSchema{ + Version: Version, + RollupLayers: []config.Rollup{defaultRollup}, + ShardingBucketsCount: shardingBucketsCount, + PartitionerInterval: partitionInterval, + ChunckerInterval: chunkInterval, + PreAggregates: preaggregates, + } + + fields, err := aggregate.SchemaFieldFromString(aggregates, "v") + if err != nil { + return nil, errors.Wrapf(err, "Failed to create an aggregates list from string '%s'.", aggregates) + } + fields = append(fields, config.SchemaField{Name: "_name", Type: "string", Nullable: false, Items: ""}) + + partitionSchema := config.PartitionSchema{ + Version: tableSchema.Version, + Aggregates: aggregates, + AggregationGranularity: aggregationGranularity, + StorageClass: config.DefaultStorageClass, + SampleRetention: config.DefaultSampleRetentionTime, + ChunckerInterval: tableSchema.ChunckerInterval, + PartitionerInterval: tableSchema.PartitionerInterval, + } + + schema := &config.Schema{ + TableSchemaInfo: tableSchema, + PartitionSchemaInfo: partitionSchema, + Partitions: []*config.Partition{}, + Fields: fields, + } + + return schema, nil +} + +func calculatePartitionAndChunkInterval(rateInHours, minChunkSize, maxChunkSize, maxSampleSize, maxPartitionSize int) (string, string, error) { + maxNumberOfEventsPerChunk := maxChunkSize / maxSampleSize + minNumberOfEventsPerChunk := minChunkSize / maxSampleSize + + chunkInterval := maxNumberOfEventsPerChunk / rateInHours + if chunkInterval == 0 { + return "", "", fmt.Errorf("the samples ingestion rate (%v/h) is too high", rateInHours) + } + + // Make sure the expected chunk size is greater then the supported minimum. + if chunkInterval < minNumberOfEventsPerChunk/rateInHours { + return "", "", fmt.Errorf( + "the calculated chunk size is smaller than the minimum: samples ingestion rate = %v/h, calculated chunk interval = %v, minimum size = %v", + rateInHours, chunkInterval, minChunkSize) + } + + actualCapacityOfChunk := chunkInterval * rateInHours * maxSampleSize + numberOfChunksInPartition := 0 + + for (numberOfChunksInPartition+24)*actualCapacityOfChunk < maxPartitionSize { + numberOfChunksInPartition += 24 + } + if numberOfChunksInPartition == 0 { + return "", "", errors.Errorf("the samples ingestion rate (%v/h) is too high - cannot fit a partition in a day interval with the calculated chunk size (%v)", rateInHours, chunkInterval) + } + + partitionInterval := numberOfChunksInPartition * chunkInterval + return strconv.Itoa(chunkInterval) + "h", strconv.Itoa(partitionInterval) + "h", nil +} + +func rateToHours(samplesIngestionRate string) (int, error) { + parsingError := errors.New(`Invalid samples ingestion rate. The rate must be of the format "[0-9]+/[smh]". For example, "12/m"`) + + if len(samplesIngestionRate) < 3 { + return 0, parsingError + } + if samplesIngestionRate[len(samplesIngestionRate)-2] != '/' { + return 0, parsingError + } + + last := samplesIngestionRate[len(samplesIngestionRate)-1] + // Get the ingestion-rate samples number, ignoring the slash and time unit + samplesIngestionRate = samplesIngestionRate[:len(samplesIngestionRate)-2] + i, err := strconv.Atoi(samplesIngestionRate) + if err != nil { + return 0, errors.Wrap(err, parsingError.Error()) + } + if i <= 0 { + return 0, fmt.Errorf("invalid samples ingestion rate (%s). The rate cannot have a negative number of samples", samplesIngestionRate) + } + switch last { + case 's': + return i * 60 * 60, nil + case 'm': + return i * 60, nil + case 'h': + return i, nil + default: + return 0, parsingError + } +} + +func validateAggregatesGranularity(aggregationGranularity string, partitionInterval string, hasAggregates bool) error { + dayMillis := 24 * int64(time.Hour/time.Millisecond) + duration, err := utils.Str2duration(aggregationGranularity) + if err != nil { + return errors.Wrapf(err, "Failed to parse aggregation granularity '%s'.", aggregationGranularity) + } + + if dayMillis%duration != 0 && duration%dayMillis != 0 { + return errors.New("the aggregation granularity should be a divisor or a dividend of 1 day. Examples: \"10m\"; \"30m\"; \"2h\"") + } + + if hasAggregates { + partitionIntervalDuration, _ := utils.Str2duration(partitionInterval) // safe to ignore error since we create 'partitionInterval' + if partitionIntervalDuration/duration > MaxV3ioArraySize { + return errors.New("the size of the aggregation-granularity interval isn't sufficiently larger than the specified ingestion rate. Try increasing the granularity to get the expected pre-aggregation performance impact") + } + } + return nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema_test.go new file mode 100644 index 00000000..9817fd6f --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema_test.go @@ -0,0 +1,64 @@ +// +build unit + +package schema + +import ( + "fmt" + "testing" +) + +func TestRateToHour(t *testing.T) { + cases := []struct { + input string + output int + shouldFail bool + }{ + {input: "1/s", output: 3600}, + {input: "12/m", output: 12 * 60}, + {input: "2/h", output: 2}, + {input: "1m", shouldFail: true}, + {input: "1/t", shouldFail: true}, + {input: "-431/t", shouldFail: true}, + {input: "-1", shouldFail: true}, + {input: "", shouldFail: true}, + } + + for _, testCase := range cases { + t.Run(testCase.input, func(t *testing.T) { + actual, err := rateToHours(testCase.input) + if err != nil && !testCase.shouldFail { + t.Fatalf("got unexpected error %v", err) + } else if actual != testCase.output { + t.Fatalf("actual %v is not equal to expected %v", actual, testCase.output) + } + }) + } +} + +func TestAggregationGranularityValidation(t *testing.T) { + cases := []struct { + granularity string + partitionInterval string + hasAggregates bool + shouldFail bool + }{ + {granularity: "1h", partitionInterval: "48h", hasAggregates: true, shouldFail: false}, + {granularity: "15m", partitionInterval: "2880h", hasAggregates: true, shouldFail: false}, + {granularity: "1h", partitionInterval: "150000h", hasAggregates: true, shouldFail: true}, + {granularity: "1h", partitionInterval: "150000h", hasAggregates: false, shouldFail: false}, + {granularity: "30m", partitionInterval: "75000h", hasAggregates: true, shouldFail: true}, + } + + for _, testCase := range cases { + testName := fmt.Sprintf("%v - %v - %v", + testCase.granularity, testCase.partitionInterval, testCase.hasAggregates) + t.Run(testName, func(t *testing.T) { + err := validateAggregatesGranularity(testCase.granularity, testCase.partitionInterval, testCase.hasAggregates) + + if err != nil && !testCase.shouldFail || + err == nil && testCase.shouldFail { + t.Fatalf("test shouldFail=%v, and got error: %v", testCase.shouldFail, err) + } + }) + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdb_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdb_integration_test.go new file mode 100644 index 00000000..012478a1 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdb_integration_test.go @@ -0,0 +1,115 @@ +// +build integration + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package tsdb_test + +import ( + "math" + "testing" + "time" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testTsdbSuite struct { + suite.Suite +} + +func (suite *testTsdbSuite) TestAppend() { + testCtx := suite.T() + testParams := tsdbtest.NewTestParams(testCtx) + defer tsdbtest.SetUp(testCtx, testParams)() + + adapter, err := tsdb.NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + suite.Require().NoError(err) + + appender, err := adapter.Appender() + suite.Require().NoError(err) + + querier, err := adapter.Querier(nil, 0, math.MaxInt64) + suite.Require().NoError(err) + + t1 := suite.parseTime("2018-11-01T00:00:00Z") + t2 := suite.parseTime("2018-11-03T00:00:00Z") + + _, err = appender.Add( + utils.Labels{utils.Label{Name: "__name__", Value: "AAPL"}, utils.Label{Name: "market", Value: "usa"}}, + t1, + -91.0) + suite.Require().NoError(err) + _, err = appender.Add( + utils.Labels{utils.Label{Name: "__name__", Value: "AAL"}, utils.Label{Name: "market", Value: "usa"}}, + t1, + -87.0) + suite.Require().NoError(err) + _, err = appender.Add( + utils.Labels{utils.Label{Name: "__name__", Value: "AAP"}, utils.Label{Name: "market", Value: "usa"}}, + t2, + -50.0) + suite.Require().NoError(err) + + _, err = appender.WaitForCompletion(0) + suite.Require().NoError(err) + + set, err := querier.Select("", "min", int64(time.Hour/time.Millisecond), "1==1") + suite.Require().NoError(err) + + // TODO: Replace map[tv]struct{} with []tv once TSDB-37 is fixed. This open issue causes duplicate results. + var result = make(map[string]map[tv]struct{}) + for set.Next() { + suite.Require().Nil(set.Err()) + key := set.At().Labels().String() + var samples = make(map[tv]struct{}) + iter := set.At().Iterator() + for iter.Next() { + t, v := iter.At() + samples[tv{t: t, v: v}] = struct{}{} + } + result[key] = samples + } + + expected := map[string]map[tv]struct{}{ + `{__name__="AAPL", market="usa", Aggregate="min"}`: {tv{t: t1, v: -91}: struct{}{}}, + `{__name__="AAL", market="usa", Aggregate="min"}`: {tv{t: t1, v: -87}: struct{}{}}, + `{__name__="AAP", market="usa", Aggregate="min"}`: {tv{t: t2, v: -50}: struct{}{}}, + } + + suite.Require().Equal(expected, result) +} + +func (suite *testTsdbSuite) parseTime(timestamp string) int64 { + t, err := time.Parse(time.RFC3339, timestamp) + suite.Require().NoError(err) + return t.Unix() * 1000 +} + +type tv struct { + t int64 + v float64 +} + +func TestTsdbSuite(t *testing.T) { + suite.Run(t, new(testTsdbSuite)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go new file mode 100644 index 00000000..58b259c8 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go @@ -0,0 +1,65 @@ +package tsdbtest + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/config" +) + +const TsdbDefaultTestConfigPath = "testdata" + +// nolint: deadcode,varcheck +const relativeProjectPath = "src/github.com/v3io/v3io-tsdb" + +/* +This method will try and load the configuration file from several locations by the following order: +1. Environment variable named 'V3IO_TSDB_CONFIG' +2. Current package's 'testdata/v3io-tsdb-config.yaml' folder +3. $GOPATH/src/github.com/v3io/v3io-tsdb/v3io-tsdb-config.yaml +*/ +func GetV3ioConfigPath() (string, error) { + if configurationPath := os.Getenv(config.V3ioConfigEnvironmentVariable); configurationPath != "" { + return configurationPath, nil + } + + localConfigFile := filepath.Join(TsdbDefaultTestConfigPath, config.DefaultConfigurationFileName) + if _, err := os.Stat(localConfigFile); !os.IsNotExist(err) { + return localConfigFile, nil + } + + // Look for a parent directory containing a makefile and the configuration file (presumed to be the project root). + dirPath := "./" + for { + _, err := os.Stat(dirPath + "Makefile") + if err == nil { + confFilePath := dirPath + config.DefaultConfigurationFileName + _, err = os.Stat(confFilePath) + if err == nil { + return confFilePath, nil + } + break // Bail out if we found the makefile but the config is not there. + } + absolute, err := filepath.Abs(dirPath) + if err != nil || absolute == "/" { // Bail out if we reached the root. + break + } + dirPath += "../" + } + + return "", errors.Errorf("config file is not specified and could not be found") +} + +func LoadV3ioConfig() (*config.V3ioConfig, error) { + path, err := GetV3ioConfigPath() + if err != nil { + return nil, err + } + v3ioConfig, err := config.GetOrLoadFromFile(path) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("unable to load test configuration from '%s'", path)) + } + return v3ioConfig, nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config_test.go new file mode 100644 index 00000000..5a8817bf --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config_test.go @@ -0,0 +1,193 @@ +// +build unit + +package tsdbtest + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/config" +) + +func createTestConfig(t *testing.T, path string) { + fullPath := filepath.Join(path, config.DefaultConfigurationFileName) + _, err := os.Create(fullPath) + if err != nil { + t.Fatalf("Failed to create file at %s. Error: %v", fullPath, err) + } + t.Logf("---> Created test configuration at: %s", fullPath) +} + +func deleteTestConfig(t *testing.T, path string) { + fullPath := filepath.Join(path, config.DefaultConfigurationFileName) + err := os.Remove(fullPath) + if err != nil && !os.IsNotExist(err) { + t.Errorf("Failed to remove file at %s. Error: %v", fullPath, err) + } + t.Logf("<--- Removed test configuration from: %s", fullPath) +} + +func TestGetV3ioConfigPath(t *testing.T) { + projectHome := "../../.." + testCases := []struct { + description string + expectedPath string + setup func() func() + }{ + {description: "get config from package testdata", + expectedPath: filepath.Join(TsdbDefaultTestConfigPath, config.DefaultConfigurationFileName), + setup: func() func() { + // Make this test agnostic to environment variables at runtime (store & recover on exit) + configPathEnv := os.Getenv(config.V3ioConfigEnvironmentVariable) + os.Unsetenv(config.V3ioConfigEnvironmentVariable) + + if _, err := os.Stat(filepath.Join(TsdbDefaultTestConfigPath, config.DefaultConfigurationFileName)); !os.IsNotExist(err) { + return func() { + os.Setenv(config.V3ioConfigEnvironmentVariable, configPathEnv) + } + } else { + path := TsdbDefaultTestConfigPath + if err := os.Mkdir(path, 0777); err != nil && !os.IsExist(err) { + t.Fatalf("Failed to mkdir %v", err) + } + createTestConfig(t, path) + return func() { + os.Setenv(config.V3ioConfigEnvironmentVariable, configPathEnv) + deleteTestConfig(t, path) + os.RemoveAll(path) + } + } + }}, + + {description: "get config from project root", + expectedPath: "./../../../v3io-tsdb-config.yaml", + setup: func() func() { + // Make this test agnostic to environment variables at runtime (store & recover on exit) + configPathEnv := os.Getenv(config.V3ioConfigEnvironmentVariable) + os.Unsetenv(config.V3ioConfigEnvironmentVariable) + + if _, err := os.Stat(filepath.Join(projectHome, config.DefaultConfigurationFileName)); !os.IsNotExist(err) { + return func() { + os.Setenv(config.V3ioConfigEnvironmentVariable, configPathEnv) + } + } else { + path := projectHome + createTestConfig(t, path) + return func() { + os.Setenv(config.V3ioConfigEnvironmentVariable, configPathEnv) + deleteTestConfig(t, path) + os.Remove(path) + } + } + }}, + + {description: "get config from env var", + expectedPath: getConfigPathFromEnvOrDefault(), + setup: func() func() { + env := os.Getenv(config.V3ioConfigEnvironmentVariable) + if env == "" { + os.Setenv(config.V3ioConfigEnvironmentVariable, config.DefaultConfigurationFileName) + return func() { + os.Unsetenv(config.V3ioConfigEnvironmentVariable) + } + } + return func() {} + }}, + } + + for _, test := range testCases { + t.Run(test.description, func(t *testing.T) { + testGetV3ioConfigPathCase(t, test.expectedPath, test.setup) + }) + } +} + +func getConfigPathFromEnvOrDefault() string { + configPath := os.Getenv(config.V3ioConfigEnvironmentVariable) + if configPath == "" { + configPath = config.DefaultConfigurationFileName + } + return configPath +} + +func testGetV3ioConfigPathCase(t *testing.T, expected string, setup func() func()) { + defer setup()() + path, err := GetV3ioConfigPath() + if err != nil { + t.Fatal("Failed to get configuration path", err) + } + assert.Equal(t, expected, path) +} + +func TestMergeConfig(t *testing.T) { + defaultCfg, err := config.GetOrDefaultConfig() + if err != nil { + t.Fatal("Failed to get default configuration", err) + } + + updateWithCfg := config.V3ioConfig{ + BatchSize: 128, + TablePath: "test-new-table", + MetricsReporter: config.MetricsReporterConfig{ + ReportOnShutdown: true, + RepotInterval: 120, + }, + } + + mergedCfg, err := defaultCfg.Merge(&updateWithCfg) + if err != nil { + t.Fatal("Failed to update default configuration", err) + } + + // Validate result structure + assert.Equal(t, mergedCfg.BatchSize, 128) + assert.Equal(t, mergedCfg.TablePath, "test-new-table") + assert.Equal(t, mergedCfg.MetricsReporter.ReportOnShutdown, true) + assert.Equal(t, mergedCfg.MetricsReporter.RepotInterval, 120) + + // Make sure that default configuration remains unchanged + snapshot, err := config.GetOrDefaultConfig() + if err != nil { + t.Fatal("Failed to get default configuration", err) + } + + assert.Equal(t, snapshot.BatchSize, defaultCfg.BatchSize) + assert.Equal(t, snapshot.TablePath, defaultCfg.TablePath) + assert.Equal(t, snapshot.MetricsReporter.ReportOnShutdown, defaultCfg.MetricsReporter.ReportOnShutdown) + assert.Equal(t, snapshot.MetricsReporter.RepotInterval, defaultCfg.MetricsReporter.RepotInterval) + + assert.NotNil(t, defaultCfg.BuildInfo) + assert.NotNil(t, defaultCfg.BuildInfo.String()) +} + +func TestWithDefaults(t *testing.T) { + myCfg := &config.V3ioConfig{ + BatchSize: 1024, + TablePath: "test-my-table", + MetricsReporter: config.MetricsReporterConfig{ + ReportOnShutdown: true, + RepotInterval: 180, + ReportPeriodically: true, + }, + } + + updatedCfg := config.WithDefaults(myCfg) + + // Make sure it didn't override anything + assert.Equal(t, updatedCfg.BatchSize, myCfg.BatchSize) + assert.Equal(t, updatedCfg.TablePath, myCfg.TablePath) + assert.Equal(t, updatedCfg.MetricsReporter.ReportPeriodically, myCfg.MetricsReporter.ReportPeriodically) + assert.Equal(t, updatedCfg.MetricsReporter.RepotInterval, myCfg.MetricsReporter.RepotInterval) + assert.Equal(t, updatedCfg.MetricsReporter.ReportOnShutdown, myCfg.MetricsReporter.ReportOnShutdown) + + // and default value is set for ShardingBucketsCount + assert.Equal(t, updatedCfg.ShardingBucketsCount, config.DefaultShardingBucketsCount) + + // WithDefaults method does not create new configuration struct, therefore result object has the same address as myCfg + assert.Equal(t, myCfg, updatedCfg) + + assert.NotNil(t, updatedCfg.BuildInfo) + assert.NotNil(t, updatedCfg.BuildInfo.String()) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils/schema.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils/schema.go new file mode 100644 index 00000000..bae13b4b --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils/schema.go @@ -0,0 +1,21 @@ +package testutils + +import ( + "testing" + + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" +) + +func CreateSchema(t testing.TB, aggregates string) *config.Schema { + v3ioCfg, err := config.GetOrDefaultConfig() + if err != nil { + t.Fatalf("Failed to obtain a TSDB configuration. Error: %v", err) + } + + schm, err := schema.NewSchema(v3ioCfg, "1/s", "1h", aggregates, "") + if err != nil { + t.Fatalf("Failed to create a TSDB schema. Error: %v", err) + } + return schm +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go new file mode 100644 index 00000000..879b8cf7 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go @@ -0,0 +1,406 @@ +package tsdbtest + +import ( + json2 "encoding/json" + "fmt" + "os" + "path" + "regexp" + "strings" + "testing" + "time" + + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + // nolint: golint + . "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const MinuteInMillis = 60 * 1000 +const HoursInMillis = 60 * MinuteInMillis +const DaysInMillis = 24 * HoursInMillis + +type DataPoint struct { + Time int64 + Value interface{} +} + +func (dp DataPoint) Equals(other DataPoint) bool { + if &dp.Time != &other.Time { + return true + } + if dp.Time != other.Time { + return false + } + + switch dpVal := dp.Value.(type) { + case float64: + switch oVal := other.Value.(type) { + case float64: + return dpVal == oVal + case int: + return dpVal == float64(oVal) + default: + return false + } + case int: + switch oVal := other.Value.(type) { + case float64: + return float64(dpVal) == oVal + case int: + return dpVal == oVal + default: + return false + } + case string: + switch oVal := other.Value.(type) { + case string: + return oVal == dpVal + case float64: + soVal := fmt.Sprintf("%f", oVal) + return dpVal == soVal + case int: + soVal := fmt.Sprintf("%d", oVal) + return dpVal == soVal + default: + return false + } + default: + return false + } +} + +type Metric struct { + Name string + Labels utils.Labels + Data []DataPoint + ExpectedCount *int +} +type TimeSeries []Metric + +const OptDropTableOnTearDown = "DropTableOnTearDown" +const OptIgnoreReason = "IgnoreReason" +const OptTimeSeries = "TimeSeries" +const OptV3ioConfig = "V3ioConfig" + +type TestParams map[string]interface{} +type TestOption struct { + Key string + Value interface{} +} + +func NewTestParams(t testing.TB, opts ...TestOption) TestParams { + initialSize := len(opts) + testOpts := make(TestParams, initialSize) + + // Initialize defaults + testOpts[OptDropTableOnTearDown] = true + testOpts[OptIgnoreReason] = "" + testOpts[OptTimeSeries] = TimeSeries{} + + defaultV3ioConfig, err := LoadV3ioConfig() + if err != nil { + t.Fatalf("Unable to get V3IO configuration.\nError: %v", err) + } + + //defaultV3ioConfig.TablePath = PrefixTablePath(t.Name()) + testOpts[OptV3ioConfig] = defaultV3ioConfig + + for _, opt := range opts { + testOpts[opt.Key] = opt.Value + } + + return testOpts +} + +func (tp TestParams) TimeSeries() TimeSeries { + return tp[OptTimeSeries].(TimeSeries) +} +func (tp TestParams) DropTableOnTearDown() bool { + return tp[OptDropTableOnTearDown].(bool) +} +func (tp TestParams) IgnoreReason() string { + return tp[OptIgnoreReason].(string) +} +func (tp TestParams) V3ioConfig() *config.V3ioConfig { + return tp[OptV3ioConfig].(*config.V3ioConfig) +} + +// DataPointTimeSorter sorts DataPoints by time +type DataPointTimeSorter []DataPoint + +func (a DataPointTimeSorter) Len() int { return len(a) } +func (a DataPointTimeSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DataPointTimeSorter) Less(i, j int) bool { return a[i].Time < a[j].Time } + +type Sample struct { + Lset utils.Labels + Time string + Value float64 +} + +func DeleteTSDB(t testing.TB, v3ioConfig *config.V3ioConfig) { + adapter, err := NewV3ioAdapter(v3ioConfig, nil, nil) + if err != nil { + t.Fatalf("Failed to create an adapter. Reason: %s", err) + } + + if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { + t.Fatalf("Failed to delete a TSDB instance (table) on teardown. Reason: %s", err) + } +} + +func CreateTestTSDB(t testing.TB, v3ioConfig *config.V3ioConfig) { + CreateTestTSDBWithAggregates(t, v3ioConfig, "*") +} + +func CreateTestTSDBWithAggregates(t testing.TB, v3ioConfig *config.V3ioConfig, aggregates string) { + schema := testutils.CreateSchema(t, aggregates) + if err := CreateTSDB(v3ioConfig, schema, nil); err != nil { + v3ioConfigAsJSON, _ := json2.MarshalIndent(v3ioConfig, "", " ") + t.Fatalf("Failed to create a TSDB instance (table). Reason: %v\nConfiguration:\n%s", err, string(v3ioConfigAsJSON)) + } +} + +func tearDown(t testing.TB, v3ioConfig *config.V3ioConfig, testParams TestParams) { + // Don't delete the TSDB table if the test failed or test expects that + if !t.Failed() && testParams.DropTableOnTearDown() { + DeleteTSDB(t, v3ioConfig) + } +} + +func SetUp(t testing.TB, testParams TestParams) func() { + v3ioConfig := testParams.V3ioConfig() + + if overrideTableName, ok := testParams["override_test_name"]; ok { + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%v", overrideTableName)) + } else { + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) + } + + CreateTestTSDB(t, v3ioConfig) + + // Measure performance + metricReporter, err := performance.DefaultReporterInstance() + if err != nil { + t.Fatalf("Unable to initialize the performance metrics reporter. Reason: %v", err) + } + // nolint: errcheck + metricReporter.Start() + + return func() { + // nolint: errcheck + defer metricReporter.Stop() + tearDown(t, v3ioConfig, testParams) + } +} + +func SetUpWithData(t *testing.T, testOpts TestParams) (*V3ioAdapter, func()) { + teardown := SetUp(t, testOpts) + adapter := InsertData(t, testOpts) + return adapter, teardown +} + +func SetUpWithDBConfig(t *testing.T, schema *config.Schema, testParams TestParams) func() { + v3ioConfig := testParams.V3ioConfig() + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) + if err := CreateTSDB(v3ioConfig, schema, nil); err != nil { + v3ioConfigAsJSON, _ := json2.MarshalIndent(v3ioConfig, "", " ") + t.Fatalf("Failed to create a TSDB instance (table). Reason: %s\nConfiguration:\n%s", err, string(v3ioConfigAsJSON)) + } + + // Measure performance + metricReporter, err := performance.DefaultReporterInstance() + if err != nil { + t.Fatalf("Unable to initialize the performance metrics reporter. Error: %v", err) + } + // nolint: errcheck + metricReporter.Start() + + return func() { + // nolint: errcheck + defer metricReporter.Stop() + tearDown(t, v3ioConfig, testParams) + } +} + +func InsertData(t *testing.T, testParams TestParams) *V3ioAdapter { + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create a V3IO TSDB adapter. Reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get an appender. Reason: %s", err) + } + + timeSeries := testParams.TimeSeries() + + for _, metric := range timeSeries { + + labels := utils.Labels{utils.Label{Name: "__name__", Value: metric.Name}} + labels = append(labels, metric.Labels...) + + ref, err := appender.Add(labels, metric.Data[0].Time, metric.Data[0].Value) + if err != nil { + t.Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for _, curr := range metric.Data[1:] { + err := appender.AddFast(labels, ref, curr.Time, curr.Value) + if err != nil { + t.Fatalf("Failed to AddFast. Reason: %s", err) + } + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + } + + return adapter +} + +func ValidateCountOfSamples(t testing.TB, adapter *V3ioAdapter, metricName string, expected int, startTimeMs, endTimeMs int64, queryAggStep int64) { + + var stepSize int64 + if queryAggStep <= 0 { + var err error + stepSize, err = utils.Str2duration("1h") + if err != nil { + t.Fatal(err, "Failed to create an aggregation interval (step).") + } + } else { + stepSize = queryAggStep + } + + qry, err := adapter.QuerierV2() + if err != nil { + t.Fatal(err, "Failed to create a Querier instance.") + } + + selectParams := &pquerier.SelectParams{From: startTimeMs - stepSize, + To: endTimeMs, + Functions: "count", + Step: stepSize, + Filter: fmt.Sprintf("starts(__name__, '%v')", metricName)} + set, _ := qry.Select(selectParams) + + var actualCount int + for set.Next() { + if set.Err() != nil { + t.Fatal(set.Err(), "Failed to get the next element from the result set.") + } + + series := set.At() + iter := series.Iterator() + for iter.Next() { + if iter.Err() != nil { + t.Fatal(set.Err(), "Failed to get the next time-value pair from the iterator.") + } + _, v := iter.At() + actualCount += int(v) + } + } + + if set.Err() != nil { + t.Fatal(set.Err()) + } + + if expected != actualCount { + t.Fatalf("Check failed: the metric samples' actual count isn't as expected [%d(actualCount) != %d(expected)].", actualCount, expected) + } + + t.Logf("PASS: the metric-samples actual count matches the expected total count [%d(actualCount) == %d(expected)].", actualCount, expected) +} + +func ValidateRawData(t testing.TB, adapter *V3ioAdapter, metricName string, startTimeMs, endTimeMs int64, isValid func(*DataPoint, *DataPoint) bool) { + + qry, err := adapter.Querier(nil, startTimeMs, endTimeMs) + if err != nil { + t.Fatal(err, "Failed to create a Querier instance.") + } + + set, _ := qry.Select(metricName, "", 0, "") + + for set.Next() { + // Start over for each label set + var lastDataPoint *DataPoint + + if set.Err() != nil { + t.Fatal(set.Err(), "Failed to get the next element from a result set.") + } + + series := set.At() + iter := series.Iterator() + for iter.Next() { + if iter.Err() != nil { + t.Fatal(set.Err(), "Failed to get the next time-value pair from an iterator.") + } + currentTime, currentValue := iter.At() + currentDataPoint := &DataPoint{Time: currentTime, Value: currentValue} + + if lastDataPoint != nil { + switch dataType := lastDataPoint.Value.(type) { + case string, float64, int, int64: + // Note: We cast float to integer to eliminate the risk of a precision error + if !isValid(lastDataPoint, currentDataPoint) { + t.Fatalf("The raw-data consistency check failed: metric name='%s'\n\tisValid(%v, %v) == false", + metricName, lastDataPoint, currentDataPoint) + } + default: + t.Fatalf("Got value of unsupported data type: %T", dataType) + } + } + lastDataPoint = currentDataPoint + } + } + + if set.Err() != nil { + t.Fatal(set.Err()) + } +} + +func NormalizePath(path string) string { + chars := []string{":", "+"} + r := strings.Join(chars, "") + re := regexp.MustCompile("[" + r + "]+") + return re.ReplaceAllString(path, "_") +} + +func PrefixTablePath(tablePath string) string { + base := os.Getenv("TSDB_TEST_TABLE_PATH") + if base == "" { + return tablePath + } + return path.Join(os.Getenv("TSDB_TEST_TABLE_PATH"), tablePath) +} + +func IteratorToSlice(it chunkenc.Iterator) ([]DataPoint, error) { + var result []DataPoint + for it.Next() { + t, v := it.At() + if it.Err() != nil { + return nil, it.Err() + } + result = append(result, DataPoint{Time: t, Value: v}) + } + return result, nil +} + +func NanosToMillis(nanos int64) int64 { + millis := nanos / int64(time.Millisecond) + return millis +} + +func DateStringToMillis(date string) (int64, error) { + t, err := time.Parse(time.RFC3339, date) + if err != nil { + return 0, err + } + return t.Unix() * 1000, nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go new file mode 100644 index 00000000..ed5a9ee3 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go @@ -0,0 +1,845 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package tsdb + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "math" + pathUtil "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/dataplane/http" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/appender" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/querier" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const ( + defaultHTTPTimeout = 30 * time.Second + + errorCodeString = "ErrorCode" + falseConditionOuterErrorCode = "184549378" // todo: change codes + falseConditionInnerErrorCode = "385876025" + maxExpressionsInUpdateItem = 1500 // max is 2000, we're taking a buffer since it doesn't work with 2000 +) + +type V3ioAdapter struct { + startTimeMargin int64 + logger logger.Logger + container v3io.Container + HTTPTimeout time.Duration + MetricsCache *appender.MetricsCache + cfg *config.V3ioConfig + partitionMngr *partmgr.PartitionManager +} + +type DeleteParams struct { + Metrics []string + Filter string + From, To int64 + DeleteAll bool + + IgnoreErrors bool +} + +func CreateTSDB(cfg *config.V3ioConfig, schema *config.Schema, container v3io.Container) error { + + lgr, _ := utils.NewLogger(cfg.LogLevel) + httpTimeout := parseHTTPTimeout(cfg, lgr) + var err error + if container == nil { + container, err = utils.CreateContainer(lgr, cfg, httpTimeout) + if err != nil { + return errors.Wrap(err, "Failed to create a data container.") + } + } + data, err := json.Marshal(schema) + if err != nil { + return errors.Wrap(err, "Failed to marshal the TSDB schema file.") + } + + dataPlaneInput := v3io.DataPlaneInput{Timeout: httpTimeout} + + path := pathUtil.Join(cfg.TablePath, config.SchemaConfigFileName) + // Check whether the config file already exists, and abort if it does + _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: path, DataPlaneInput: dataPlaneInput}) + if err == nil { + return fmt.Errorf("A TSDB table already exists at path '" + cfg.TablePath + "'.") + } + + err = container.PutObjectSync(&v3io.PutObjectInput{Path: path, Body: data, DataPlaneInput: dataPlaneInput}) + if err != nil { + return errors.Wrapf(err, "Failed to create a TSDB schema at path '%s/%s/%s'.", cfg.WebAPIEndpoint, cfg.Container, path) + } + return err +} + +func parseHTTPTimeout(cfg *config.V3ioConfig, logger logger.Logger) time.Duration { + if cfg.HTTPTimeout == "" { + return defaultHTTPTimeout + } + timeout, err := time.ParseDuration(cfg.HTTPTimeout) + if err != nil { + logger.Warn("Failed to parse httpTimeout '%s'. Defaulting to %d millis.", cfg.HTTPTimeout, defaultHTTPTimeout/time.Millisecond) + return defaultHTTPTimeout + } + return timeout +} + +// Create a new TSDB adapter, similar to Prometheus TSDB adapter but with a few +// extensions. The Prometheus compliant adapter is found under /promtsdb. +func NewV3ioAdapter(cfg *config.V3ioConfig, container v3io.Container, logger logger.Logger) (*V3ioAdapter, error) { + + var err error + newV3ioAdapter := V3ioAdapter{} + newV3ioAdapter.cfg = cfg + if logger != nil { + newV3ioAdapter.logger = logger + } else { + newV3ioAdapter.logger, err = utils.NewLogger(cfg.LogLevel) + if err != nil { + return nil, err + } + } + + newV3ioAdapter.HTTPTimeout = parseHTTPTimeout(cfg, logger) + + if container != nil { + newV3ioAdapter.container = container + } else { + newV3ioAdapter.container, err = utils.CreateContainer(newV3ioAdapter.logger, cfg, newV3ioAdapter.HTTPTimeout) + if err != nil { + return nil, errors.Wrap(err, "Failed to create V3IO data container") + } + } + + err = newV3ioAdapter.connect() + + return &newV3ioAdapter, err +} + +func NewContainer(v3ioURL string, numWorkers int, accessKey string, username string, password string, containerName string, logger logger.Logger) (v3io.Container, error) { + newContextInput := &v3iohttp.NewContextInput{ + NumWorkers: numWorkers, + } + ctx, err := v3iohttp.NewContext(logger, newContextInput) + if err != nil { + return nil, err + } + + session, err := ctx.NewSession(&v3io.NewSessionInput{URL: v3ioURL, Username: username, Password: password, AccessKey: accessKey}) + if err != nil { + return nil, errors.Wrap(err, "Failed to create session.") + } + + container, err := session.NewContainer(&v3io.NewContainerInput{ContainerName: containerName}) + if err != nil { + return nil, err + } + return container, nil +} + +func (a *V3ioAdapter) GetSchema() *config.Schema { + return a.partitionMngr.GetConfig() +} + +func (a *V3ioAdapter) GetLogger(child string) logger.Logger { + return a.logger.GetChild(child) +} + +func (a *V3ioAdapter) GetContainer() (v3io.Container, string) { + return a.container, a.cfg.TablePath +} + +func (a *V3ioAdapter) connect() error { + + fullpath := fmt.Sprintf("%s/%s/%s", a.cfg.WebAPIEndpoint, a.cfg.Container, a.cfg.TablePath) + resp, err := a.container.GetObjectSync(&v3io.GetObjectInput{Path: pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName)}) + if err != nil { + if utils.IsNotExistsError(err) { + return errors.Errorf("No TSDB schema file found at '%s'.", fullpath) + } + return errors.Wrapf(err, "Failed to read a TSDB schema from '%s'.", fullpath) + } + + tableSchema := config.Schema{} + err = json.Unmarshal(resp.Body(), &tableSchema) + if err != nil { + return errors.Wrapf(err, "Failed to unmarshal the TSDB schema at '%s', got: %v .", fullpath, string(resp.Body())) + } + + // in order to support backward compatibility we do not fail on version mismatch and only logging warning + if a.cfg.LoadPartitionsFromSchemaAttr && tableSchema.TableSchemaInfo.Version != schema.Version { + a.logger.Warn("Table Schema version mismatch - existing table schema version is %d while the tsdb library version is %d! Make sure to create the table with same library version", + tableSchema.TableSchemaInfo.Version, schema.Version) + } + + a.partitionMngr, err = partmgr.NewPartitionMngr(&tableSchema, a.container, a.cfg) + if err != nil { + return errors.Wrapf(err, "Failed to create a TSDB partition manager at '%s'.", fullpath) + } + err = a.partitionMngr.Init() + if err != nil { + return errors.Wrapf(err, "Failed to initialize the TSDB partition manager at: %s", fullpath) + } + + a.logger.Debug("Running with the following TSDB configuration: %+v\n", a.cfg) + + return nil +} + +func (a *V3ioAdapter) InitAppenderCache() error { + if a.MetricsCache == nil { + a.MetricsCache = appender.NewMetricsCache(a.container, a.logger, a.cfg, a.partitionMngr) + return a.MetricsCache.Start() + } + + return nil +} + +// Create an appender interface, for writing performance +func (a *V3ioAdapter) Appender() (Appender, error) { + err := a.InitAppenderCache() + if err != nil { + return nil, err + } + + newAppender := v3ioAppender{metricsCache: a.MetricsCache} + return newAppender, nil +} + +func (a *V3ioAdapter) StartTime() (int64, error) { + startTime := time.Now().Unix() * 1000 + return startTime - 1000*3600*24*1000, nil // TODO: from config or DB w default +} + +func (a *V3ioAdapter) Close() error { + return nil +} + +// Create a Querier interface, used for time-series queries +func (a *V3ioAdapter) Querier(_ context.Context, mint, maxt int64) (*querier.V3ioQuerier, error) { + if maxt < mint { + return nil, errors.Errorf("End time '%d' is lower than start time '%d'.", maxt, mint) + } + return querier.NewV3ioQuerier(a.container, a.logger, mint, maxt, a.cfg, a.partitionMngr), nil +} + +// Create a Querier interface, used for time-series queries +func (a *V3ioAdapter) QuerierV2() (*pquerier.V3ioQuerier, error) { + return pquerier.NewV3ioQuerier(a.container, a.logger, a.cfg, a.partitionMngr), nil +} + +// Delete by time range can optionally specify metrics and filter by labels +func (a *V3ioAdapter) DeleteDB(deleteParams DeleteParams) error { + if deleteParams.DeleteAll { + // Ignore time boundaries + deleteParams.From = 0 + deleteParams.To = math.MaxInt64 + } else { + if deleteParams.To == 0 { + deleteParams.To = time.Now().Unix() * 1000 + } + } + + // Delete Data + err := a.DeletePartitionsData(&deleteParams) + if err != nil { + return err + } + + // If no data is left, delete Names folder + if len(a.partitionMngr.GetPartitionsPaths()) == 0 { + path := filepath.Join(a.cfg.TablePath, config.NamesDirectory) + "/" // Need a trailing slash + a.logger.Info("Delete metric names at path '%s'.", path) + err := utils.DeleteTable(a.logger, a.container, path, "", a.cfg.QryWorkers) + if err != nil && !deleteParams.IgnoreErrors { + return errors.Wrap(err, "Failed to delete the metric-names table.") + } + // Delete the Directory object + err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) + if err != nil && !deleteParams.IgnoreErrors { + if !utils.IsNotExistsError(err) { + return errors.Wrapf(err, "Failed to delete table object '%s'.", path) + } + } + } + + // If need to 'deleteAll', delete schema + TSDB table folder + if deleteParams.DeleteAll { + // Delete Schema file + schemaPath := pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName) + a.logger.Info("Delete the TSDB configuration at '%s'.", schemaPath) + err := a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: schemaPath}) + if err != nil && !deleteParams.IgnoreErrors { + return errors.New("The configuration at '" + schemaPath + "' cannot be deleted or doesn't exist.") + } + + // Delete the Directory object + path := a.cfg.TablePath + "/" + err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) + if err != nil && !deleteParams.IgnoreErrors { + if !utils.IsNotExistsError(err) { + return errors.Wrapf(err, "Failed to delete table object '%s'.", path) + } + } + } + + return nil +} + +func (a *V3ioAdapter) DeletePartitionsData(deleteParams *DeleteParams) error { + partitions := a.partitionMngr.PartsForRange(deleteParams.From, deleteParams.To, true) + var entirelyDeletedPartitions []*partmgr.DBPartition + + deleteWholePartition := deleteParams.DeleteAll || (deleteParams.Filter == "" && len(deleteParams.Metrics) == 0) + + fileToDeleteChan := make(chan v3io.Item, 1024) + getItemsTerminationChan := make(chan error, len(partitions)) + deleteTerminationChan := make(chan error, a.cfg.Workers) + numOfGetItemsRoutines := len(partitions) + if len(deleteParams.Metrics) > 0 { + numOfGetItemsRoutines = numOfGetItemsRoutines * len(deleteParams.Metrics) + } + goRoutinesNum := numOfGetItemsRoutines + a.cfg.Workers + onErrorTerminationChannel := make(chan struct{}, goRoutinesNum) + systemAttributesToFetch := []string{config.ObjectNameAttrName, config.MtimeSecsAttributeName, config.MtimeNSecsAttributeName, config.EncodingAttrName, config.MaxTimeAttrName} + var getItemsWorkers, getItemsTerminated, deletesTerminated int + + var getItemsWG sync.WaitGroup + getItemsErrorChan := make(chan error, numOfGetItemsRoutines) + + aggregates := a.GetSchema().PartitionSchemaInfo.Aggregates + hasServerSideAggregations := len(aggregates) != 1 || aggregates[0] != "" + + var aggrMask aggregate.AggrType + var err error + if hasServerSideAggregations { + aggrMask, _, err = aggregate.AggregatesFromStringListWithCount(aggregates) + if err != nil { + return err + } + } + + for i := 0; i < a.cfg.Workers; i++ { + go deleteObjectWorker(a.container, deleteParams, a.logger, + fileToDeleteChan, deleteTerminationChan, onErrorTerminationChannel, + aggrMask) + } + + for _, part := range partitions { + partitionEntirelyInRange := deleteParams.From <= part.GetStartTime() && deleteParams.To >= part.GetEndTime() + deleteEntirePartitionFolder := partitionEntirelyInRange && deleteWholePartition + + // Delete all files in partition folder and then delete the folder itself + if deleteEntirePartitionFolder { + a.logger.Info("Deleting entire partition '%s'.", part.GetTablePath()) + + getItemsWG.Add(1) + go deleteEntirePartition(a.logger, a.container, part.GetTablePath(), a.cfg.QryWorkers, + &getItemsWG, getItemsErrorChan) + + entirelyDeletedPartitions = append(entirelyDeletedPartitions, part) + // First get all items based on filter+metric+time range then delete what is necessary + } else { + a.logger.Info("Deleting partial partition '%s'.", part.GetTablePath()) + + start, end := deleteParams.From, deleteParams.To + + // Round the start and end times to the nearest aggregation buckets - to later on recalculate server side aggregations + if hasServerSideAggregations { + start = part.GetAggregationBucketStartTime(part.Time2Bucket(deleteParams.From)) + end = part.GetAggregationBucketEndTime(part.Time2Bucket(deleteParams.To)) + } + + var chunkAttributesToFetch []string + + // If we don't want to delete the entire object, fetch also the desired chunks to delete. + if !partitionEntirelyInRange { + chunkAttributesToFetch, _ = part.Range2Attrs("v", start, end) + } + + allAttributes := append(chunkAttributesToFetch, systemAttributesToFetch...) + if len(deleteParams.Metrics) == 0 { + getItemsWorkers++ + input := &v3io.GetItemsInput{Path: part.GetTablePath(), + AttributeNames: allAttributes, + Filter: deleteParams.Filter} + go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) + } else { + for _, metric := range deleteParams.Metrics { + for _, shardingKey := range part.GetShardingKeys(metric) { + getItemsWorkers++ + input := &v3io.GetItemsInput{Path: part.GetTablePath(), + AttributeNames: allAttributes, + Filter: deleteParams.Filter, + ShardingKey: shardingKey} + go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) + } + } + } + } + } + a.logger.Debug("issued %v getItems", getItemsWorkers) + + // Waiting fot deleting of full partitions + getItemsWG.Wait() + select { + case err = <-getItemsErrorChan: + // Signal all other goroutines to quite + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return err + default: + } + + if getItemsWorkers != 0 { + for deletesTerminated < a.cfg.Workers { + select { + case err := <-getItemsTerminationChan: + a.logger.Debug("finished getItems worker, total finished: %v, error: %v", getItemsTerminated+1, err) + if err != nil { + // If requested to ignore non-existing tables do not return error. + if !(deleteParams.IgnoreErrors && utils.IsNotExistsOrConflictError(err)) { + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return errors.Wrapf(err, "GetItems failed during recursive delete.") + } + } + getItemsTerminated++ + + if getItemsTerminated == getItemsWorkers { + close(fileToDeleteChan) + } + case err := <-deleteTerminationChan: + a.logger.Debug("finished delete worker, total finished: %v, err: %v", deletesTerminated+1, err) + if err != nil { + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return errors.Wrapf(err, "Delete failed during recursive delete.") + } + deletesTerminated++ + } + } + } else { + close(fileToDeleteChan) + } + + a.logger.Debug("finished deleting data, removing partitions from schema") + err = a.partitionMngr.DeletePartitionsFromSchema(entirelyDeletedPartitions) + if err != nil { + return err + } + + return nil +} + +func deleteEntirePartition(logger logger.Logger, container v3io.Container, partitionPath string, workers int, + wg *sync.WaitGroup, errChannel chan<- error) { + defer wg.Done() + + err := utils.DeleteTable(logger, container, partitionPath, "", workers) + if err != nil { + errChannel <- errors.Wrapf(err, "Failed to delete partition '%s'.", partitionPath) + return + } + // Delete the Directory object + err = container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: partitionPath}) + if err != nil && !utils.IsNotExistsError(err) { + errChannel <- errors.Wrapf(err, "Failed to delete partition folder '%s'.", partitionPath) + } +} + +func getItemsWorker(logger logger.Logger, container v3io.Container, input *v3io.GetItemsInput, partition *partmgr.DBPartition, + filesToDeleteChan chan<- v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}) { + for { + select { + case _ = <-onErrorTerminationChannel: + terminationChan <- nil + return + default: + } + + logger.Debug("going to getItems for partition '%v', input: %v", partition.GetTablePath(), *input) + resp, err := container.GetItemsSync(input) + if err != nil { + terminationChan <- err + return + } + resp.Release() + output := resp.Output.(*v3io.GetItemsOutput) + + for _, item := range output.Items { + item["partition"] = partition + + // In case we got error on delete while iterating getItems response + select { + case _ = <-onErrorTerminationChannel: + terminationChan <- nil + return + default: + } + + filesToDeleteChan <- item + } + if output.Last { + terminationChan <- nil + return + } + input.Marker = output.NextMarker + } +} + +func deleteObjectWorker(container v3io.Container, deleteParams *DeleteParams, logger logger.Logger, + filesToDeleteChannel <-chan v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}, + aggrMask aggregate.AggrType) { + for { + select { + case _ = <-onErrorTerminationChannel: + return + case itemToDelete, ok := <-filesToDeleteChannel: + if !ok { + terminationChan <- nil + return + } + + currentPartition := itemToDelete.GetField("partition").(*partmgr.DBPartition) + fileName, err := itemToDelete.GetFieldString(config.ObjectNameAttrName) + if err != nil { + terminationChan <- err + return + } + fullFileName := pathUtil.Join(currentPartition.GetTablePath(), fileName) + + // Delete whole object + if deleteParams.From <= currentPartition.GetStartTime() && + deleteParams.To >= currentPartition.GetEndTime() { + + logger.Debug("delete entire item '%v' ", fullFileName) + input := &v3io.DeleteObjectInput{Path: fullFileName} + err = container.DeleteObjectSync(input) + if err != nil && !utils.IsNotExistsOrConflictError(err) { + terminationChan <- err + return + } + // Delete partial object - specific chunks or sub-parts of chunks + } else { + mtimeSecs, err := itemToDelete.GetFieldInt(config.MtimeSecsAttributeName) + if err != nil { + terminationChan <- err + return + } + mtimeNSecs, err := itemToDelete.GetFieldInt(config.MtimeNSecsAttributeName) + if err != nil { + terminationChan <- err + return + } + + deleteUpdateExpression := strings.Builder{} + dataEncoding, err := getEncoding(itemToDelete) + if err != nil { + terminationChan <- err + return + } + + var aggregationsByBucket map[int]*aggregate.AggregatesList + if aggrMask != 0 { + aggregationsByBucket = make(map[int]*aggregate.AggregatesList) + aggrBuckets := currentPartition.Times2BucketRange(deleteParams.From, deleteParams.To) + for _, bucketID := range aggrBuckets { + aggregationsByBucket[bucketID] = aggregate.NewAggregatesList(aggrMask) + } + } + + var newMaxTime int64 = math.MaxInt64 + var numberOfExpressionsInUpdate int + for attributeName, value := range itemToDelete { + if strings.HasPrefix(attributeName, "_v") { + // Check whether the whole chunk attribute needed to be deleted or just part of it. + if currentPartition.IsChunkInRangeByAttr(attributeName, deleteParams.From, deleteParams.To) { + deleteUpdateExpression.WriteString("delete(") + deleteUpdateExpression.WriteString(attributeName) + deleteUpdateExpression.WriteString(");") + } else { + currentChunksMaxTime, err := generatePartialChunkDeleteExpression(logger, &deleteUpdateExpression, attributeName, + value.([]byte), dataEncoding, deleteParams, currentPartition, aggregationsByBucket) + if err != nil { + terminationChan <- err + return + } + + // We want to save the earliest max time possible + if currentChunksMaxTime < newMaxTime { + newMaxTime = currentChunksMaxTime + } + } + numberOfExpressionsInUpdate++ + } + } + + dbMaxTime := int64(itemToDelete.GetField(config.MaxTimeAttrName).(int)) + + // Update the partition's max time if needed. + if deleteParams.From < dbMaxTime && deleteParams.To >= dbMaxTime { + if deleteParams.From < newMaxTime { + newMaxTime = deleteParams.From + } + + deleteUpdateExpression.WriteString(fmt.Sprintf("%v=%v;", config.MaxTimeAttrName, newMaxTime)) + } + + if deleteUpdateExpression.Len() > 0 { + // If there are server aggregates, update the needed buckets + if aggrMask != 0 { + for bucket, aggregations := range aggregationsByBucket { + numberOfExpressionsInUpdate = numberOfExpressionsInUpdate + len(*aggregations) + + // Due to engine limitation, If we reached maximum number of expressions in an UpdateItem + // we need to break the update into chunks + // TODO: refactor in 2.8: + // in 2.8 there is a better way of doing it by uniting multiple update expressions into + // one expression by range in a form similar to `_v_sum[15...100]=0` + if numberOfExpressionsInUpdate < maxExpressionsInUpdateItem { + deleteUpdateExpression.WriteString(aggregations.SetExpr("v", bucket)) + } else { + exprStr := deleteUpdateExpression.String() + logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) + mtimeSecs, mtimeNSecs, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) + if err != nil { + terminationChan <- err + return + } + + // Reset stuff for next update iteration + numberOfExpressionsInUpdate = 0 + deleteUpdateExpression.Reset() + } + } + } + + // If any expressions are left, save them + if deleteUpdateExpression.Len() > 0 { + exprStr := deleteUpdateExpression.String() + logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) + _, _, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) + if err != nil { + terminationChan <- err + return + } + } + } + } + } + } +} + +func sendUpdateItem(path, expr string, mtimeSecs, mtimeNSecs int, container v3io.Container) (int, int, error) { + condition := fmt.Sprintf("%v == %v and %v == %v", + config.MtimeSecsAttributeName, mtimeSecs, + config.MtimeNSecsAttributeName, mtimeNSecs) + + input := &v3io.UpdateItemInput{Path: path, + Expression: &expr, + Condition: condition} + + response, err := container.UpdateItemSync(input) + if err != nil && !utils.IsNotExistsOrConflictError(err) { + returnError := err + if isFalseConditionError(err) { + returnError = errors.Wrapf(err, "Item '%v' was updated while deleting occurred. Please disable any ingestion and retry.", path) + } + return 0, 0, returnError + } + + output := response.Output.(*v3io.UpdateItemOutput) + return output.MtimeSecs, output.MtimeNSecs, nil +} + +func getEncoding(itemToDelete v3io.Item) (chunkenc.Encoding, error) { + var encoding chunkenc.Encoding + encodingStr, ok := itemToDelete.GetField(config.EncodingAttrName).(string) + // If we don't have the encoding attribute, use XOR as default. (for backwards compatibility) + if !ok { + encoding = chunkenc.EncXOR + } else { + intEncoding, err := strconv.Atoi(encodingStr) + if err != nil { + return 0, fmt.Errorf("error parsing encoding type of chunk, got: %v, error: %v", encodingStr, err) + } + encoding = chunkenc.Encoding(intEncoding) + } + + return encoding, nil +} + +func generatePartialChunkDeleteExpression(logger logger.Logger, expr *strings.Builder, + attributeName string, value []byte, encoding chunkenc.Encoding, deleteParams *DeleteParams, + partition *partmgr.DBPartition, aggregationsByBucket map[int]*aggregate.AggregatesList) (int64, error) { + chunk, err := chunkenc.FromData(logger, encoding, value, 0) + if err != nil { + return 0, err + } + + newChunk := chunkenc.NewChunk(logger, encoding == chunkenc.EncVariant) + appender, err := newChunk.Appender() + if err != nil { + return 0, err + } + + var currentMaxTime int64 + var remainingItemsCount int + iter := chunk.Iterator() + for iter.Next() { + var t int64 + var v interface{} + if encoding == chunkenc.EncXOR { + t, v = iter.At() + } else { + t, v = iter.AtString() + } + + // Append back only events that are not in the delete range + if t < deleteParams.From || t > deleteParams.To { + remainingItemsCount++ + appender.Append(t, v) + + // Calculate server-side aggregations + if aggregationsByBucket != nil { + currentAgg, ok := aggregationsByBucket[partition.Time2Bucket(t)] + // A chunk may contain more data then needed for the aggregations, if this is the case do not aggregate + if ok { + currentAgg.Aggregate(t, v) + } + } + + // Update current chunk's new max time + if t > currentMaxTime { + currentMaxTime = t + } + } + } + + if remainingItemsCount == 0 { + expr.WriteString("delete(") + expr.WriteString(attributeName) + expr.WriteString(");") + currentMaxTime, _ = partition.GetChunkStartTimeByAttr(attributeName) + } else { + bytes := appender.Chunk().Bytes() + val := base64.StdEncoding.EncodeToString(bytes) + + expr.WriteString(fmt.Sprintf("%s=blob('%s'); ", attributeName, val)) + } + + return currentMaxTime, nil + +} + +// Return the number of items in a TSDB table +func (a *V3ioAdapter) CountMetrics(part string) (int, error) { + count := 0 + paths := a.partitionMngr.GetPartitionsPaths() + for _, path := range paths { + input := v3io.GetItemsInput{Path: path, Filter: "", AttributeNames: []string{"__size"}} + iter, err := utils.NewAsyncItemsCursor(a.container, &input, a.cfg.QryWorkers, []string{}, a.logger) + if err != nil { + return 0, err + } + + for iter.Next() { + count++ + } + if iter.Err() != nil { + return count, errors.Wrap(iter.Err(), "Failed on count iterator.") + } + } + + return count, nil +} + +type v3ioAppender struct { + metricsCache *appender.MetricsCache +} + +// Add a t/v value to a metric item and return refID (for AddFast) +func (a v3ioAppender) Add(lset utils.Labels, t int64, v interface{}) (uint64, error) { + return a.metricsCache.Add(lset, t, v) +} + +// Faster Add using refID obtained from Add (avoid some hash/lookup overhead) +func (a v3ioAppender) AddFast(lset utils.Labels, ref uint64, t int64, v interface{}) error { + return a.metricsCache.AddFast(ref, t, v) +} + +// Wait for completion of all updates +func (a v3ioAppender) WaitForCompletion(timeout time.Duration) (int, error) { + return a.metricsCache.WaitForCompletion(timeout) +} + +func (a v3ioAppender) Close() { + a.metricsCache.Close() +} + +// In V3IO, all operations are committed (no client cache) +func (a v3ioAppender) Commit() error { return nil } +func (a v3ioAppender) Rollback() error { return nil } + +// The Appender interface provides batched appends against a storage. +type Appender interface { + Add(l utils.Labels, t int64, v interface{}) (uint64, error) + AddFast(l utils.Labels, ref uint64, t int64, v interface{}) error + WaitForCompletion(timeout time.Duration) (int, error) + Commit() error + Rollback() error + Close() +} + +// Check if the current error was caused specifically because the condition was evaluated to false. +func isFalseConditionError(err error) bool { + errString := err.Error() + + if strings.Count(errString, errorCodeString) == 2 && + strings.Contains(errString, falseConditionOuterErrorCode) && + strings.Contains(errString, falseConditionInnerErrorCode) { + return true + } + + return false +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go new file mode 100644 index 00000000..af0888ae --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go @@ -0,0 +1,1133 @@ +// +build integration + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package tsdb_test + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + . "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const defaultStepMs = 5 * tsdbtest.MinuteInMillis // 5 minutes + +func TestIngestData(t *testing.T) { + timestamp := fmt.Sprintf("%d", time.Now().Unix()) //time.Now().Format(time.RFC3339) + testCases := []struct { + desc string + params tsdbtest.TestParams + }{ + {desc: "Should ingest one data point", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}}, + }}}, + ), + }, + {desc: "Should ingest multiple data points", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}, + }}}, + ), + }, + {desc: "Should ingest record with late arrival same chunk", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 - 10, Value: 3234.6}}, + }}}, + ), + }, + {desc: "Should ingest into first partition in epoch without corruption (TSDB-67)", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "coolcpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 10, Value: 314.3}, + }, + }}}, + ), + }, + {desc: "Should drop values of incompatible data types ", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "IG13146", + Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), + Data: []tsdbtest.DataPoint{ + {Time: 15, Value: 0.1}, // first add float value + {Time: 20, Value: "some string value"}, // then attempt to add string value + {Time: 30, Value: 0.2}, // and finally add another float value + }, + ExpectedCount: func() *int { var expectedCount = 2; return &expectedCount }(), + }}}, + tsdbtest.TestOption{ + Key: "override_test_name", + Value: fmt.Sprintf("IG-13146-%s", timestamp)}), + }, + {desc: "IG-13146: Should reject values of incompatible data types without data corruption", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "IG13146", + Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), + Data: []tsdbtest.DataPoint{ + {Time: 50, Value: "another string value"}, // then attempt to add string value + {Time: 60, Value: 0.4}, // valid values from this batch will be dropped + {Time: 70, Value: 0.3}, // because processing of entire batch will stop + }, + ExpectedCount: func() *int { var expectedCount = 1; return &expectedCount }(), + }}}, + tsdbtest.TestOption{ + Key: "override_test_name", + Value: fmt.Sprintf("IG-13146-%s", timestamp)}, + tsdbtest.TestOption{ + Key: "expected_error_contains_string", + // Note, the expected error message should align with pkg/appender/ingest.go:308 + Value: "trying to ingest values of incompatible data type"}), + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.params.IgnoreReason() != "" { + t.Skip(test.params.IgnoreReason()) + } + testIngestDataCase(t, test.params) + }) + } +} + +func testIngestDataCase(t *testing.T, testParams tsdbtest.TestParams) { + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + for _, dp := range testParams.TimeSeries() { + sort.Sort(tsdbtest.DataPointTimeSorter(dp.Data)) + from := dp.Data[0].Time + to := dp.Data[len(dp.Data)-1].Time + + labels := utils.Labels{utils.Label{Name: "__name__", Value: dp.Name}} + labels = append(labels, dp.Labels...) + + ref, err := appender.Add(labels, dp.Data[0].Time, dp.Data[0].Value) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + for _, curr := range dp.Data[1:] { + appender.AddFast(labels, ref, curr.Time, curr.Value) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + if !isExpected(testParams, err) { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + } + + expectedCount := len(dp.Data) + if dp.ExpectedCount != nil { + expectedCount = *dp.ExpectedCount + } + tsdbtest.ValidateCountOfSamples(t, adapter, dp.Name, expectedCount, from, to, -1) + } +} + +func isExpected(testParams tsdbtest.TestParams, actualErr error) bool { + if errMsg, ok := testParams["expected_error_contains_string"]; ok { + return strings.Contains(actualErr.Error(), fmt.Sprintf("%v", errMsg)) + } + return false +} + +func TestIngestDataWithSameTimestamp(t *testing.T) { + baseTime := int64(1532209200000) + testParams := tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: []tsdbtest.DataPoint{ + {Time: baseTime, Value: 1}, + {Time: baseTime, Value: 2}}}, + tsdbtest.Metric{ + Name: "cpu1", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: []tsdbtest.DataPoint{ + {Time: baseTime, Value: 2}, {Time: baseTime, Value: 3}}}, + }}) + + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + for _, dp := range testParams.TimeSeries() { + labels := utils.Labels{utils.Label{Name: "__name__", Value: dp.Name}} + labels = append(labels, dp.Labels...) + + ref, err := appender.Add(labels, dp.Data[0].Time, dp.Data[0].Value) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + for _, curr := range dp.Data[1:] { + appender.AddFast(labels, ref, curr.Time, curr.Value) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + } + + tsdbtest.ValidateCountOfSamples(t, adapter, "", 2, baseTime-1*tsdbtest.HoursInMillis, baseTime+1*tsdbtest.HoursInMillis, -1) +} + +// test for http://jira.iguazeng.com:8080/browse/IG-14978 +func TestIngestWithTimeDeltaBiggerThen32Bit(t *testing.T) { + data := []tsdbtest.DataPoint{ + {Time: 1384786967945, Value: 1.0}, + {Time: 1392818567945, Value: 2.0}} + testParams := tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: data}, + }}) + + schema, err := schema.NewSchema(testParams.V3ioConfig(), "1/h", "1h", "", "") + defer tsdbtest.SetUpWithDBConfig(t, schema, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + for _, dp := range testParams.TimeSeries() { + labels := utils.Labels{utils.Label{Name: "__name__", Value: dp.Name}} + labels = append(labels, dp.Labels...) + + ref, err := appender.Add(labels, dp.Data[0].Time, dp.Data[0].Value) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + for _, curr := range dp.Data[1:] { + appender.AddFast(labels, ref, curr.Time, curr.Value) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + } + + querier, _ := adapter.QuerierV2() + iter, _ := querier.Select(&pquerier.SelectParams{From: 0, + To: time.Now().Unix() * 1000}) + for iter.Next() { + dataIter := iter.At().Iterator() + actual, err := iteratorToSlice(dataIter) + if err != nil { + t.Fatal(err) + } + + assert.ElementsMatch(t, data, actual, + "result data didn't match. \nExpected: %v\n Actual: %v", data, actual) + } + + if iter.Err() != nil { + t.Fatal(err) + } +} + +func TestIngestVarTypeWithTimeDeltaBiggerThen32Bit(t *testing.T) { + data := []string{"a", "b"} + times := []int64{1384786967945, 1392818567945} + + testParams := tsdbtest.NewTestParams(t) + + schema, err := schema.NewSchema(testParams.V3ioConfig(), "1/h", "1h", "", "") + defer tsdbtest.SetUpWithDBConfig(t, schema, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + labels := utils.Labels{utils.Label{Name: "__name__", Value: "metric_1"}} + for i, v := range data { + _, err := appender.Add(labels, times[i], v) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + + querier, _ := adapter.QuerierV2() + iter, _ := querier.Select(&pquerier.SelectParams{From: 0, + To: time.Now().Unix() * 1000}) + var seriesCount int + for iter.Next() { + seriesCount++ + iter := iter.At().Iterator() + var i int + for iter.Next() { + time, value := iter.AtString() + assert.Equal(t, times[i], time, "time does not match at index %v", i) + assert.Equal(t, data[i], value, "value does not match at index %v", i) + i++ + } + } + + assert.Equal(t, 1, seriesCount, "series count didn't match expected") + + if iter.Err() != nil { + t.Fatal(err) + } +} + +func TestWriteMetricWithDashInName(t *testing.T) { + testParams := tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu-1", + Labels: utils.LabelsFromStringList("testLabel", "balbala"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 314.3}}, + }}}) + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + for _, dp := range testParams.TimeSeries() { + labels := utils.Labels{utils.Label{Name: "__name__", Value: dp.Name}} + labels = append(labels, dp.Labels...) + + _, err := appender.Add(labels, dp.Data[0].Time, dp.Data[0].Value) + if err == nil { + t.Fatalf("Test should have failed") + } + } +} + +func TestQueryData(t *testing.T) { + testCases := []struct { + desc string + testParams tsdbtest.TestParams + filter string + aggregates string + from int64 + to int64 + step int64 + expected map[string][]tsdbtest.DataPoint + ignoreReason string + expectFail bool + }{ + {desc: "Should ingest and query one data point", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("testLabel", "balbala"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 314.3}}, + }}}, + ), + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510, Value: 314.3}}}}, + + {desc: "Should ingest and query multiple data points", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510 - 10, Value: 314.3}, + {Time: 1532940510 - 5, Value: 300.3}, + {Time: 1532940510, Value: 3234.6}}, + }}}, + ), + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510 - 10, Value: 314.3}, + {Time: 1532940510 - 5, Value: 300.3}, + {Time: 1532940510, Value: 3234.6}}}}, + + {desc: "Should query with filter on metric name", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 33.3}}, + }}}, + ), + filter: "_name=='cpu'", + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510, Value: 33.3}}}}, + + {desc: "Should query with filter on label name", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 31.3}}, + }}}, + ), + filter: "os=='linux'", + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510, Value: 31.3}}}}, + + {desc: "Should ingest and query by time", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}, + }}}, + ), + from: 1532940510 + 2, + to: 1532940510 + 12, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}}}, + + {desc: "Should ingest and query by time with no results", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}, + }}}, + ), + from: 1532940510 + 1, + to: 1532940510 + 4, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{}}, + + {desc: "Should ingest and query an aggregate", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 300.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 100.4}}, + }}}, + ), + from: 1532940510, + to: 1532940510 + 11, + step: defaultStepMs, + aggregates: "sum", + expected: map[string][]tsdbtest.DataPoint{"sum": {{Time: 1532940510, Value: 701.0}}}}, + + {desc: "Should ingest and query an aggregate with interval greater than step size", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 300.3}, + {Time: 1532940510 + 60, Value: 300.3}, + {Time: 1532940510 + 2*60, Value: 100.4}, + {Time: 1532940510 + 5*60, Value: 200.0}}, + }}}, + ), + from: 1532940510, + to: 1532940510 + 6*60, + step: defaultStepMs, + aggregates: "sum", + expected: map[string][]tsdbtest.DataPoint{"sum": {{Time: 1532940510, Value: 901.0}}}}, + + {desc: "Should ingest and query multiple aggregates", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 300.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 100.4}}, + }}}, + ), + from: 1532940510, + to: 1532940510 + 11, + step: defaultStepMs, + aggregates: "sum,count", + expected: map[string][]tsdbtest.DataPoint{"sum": {{Time: 1532940510, Value: 701.0}}, + "count": {{Time: 1532940510, Value: 3}}}}, + + {desc: "Should fail on query with illegal time (switch from and to)", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}, + }}}, + ), + from: 1532940510 + 1, + to: 0, + step: defaultStepMs, + expectFail: true, + }, + + {desc: "Should query with filter on not existing metric name", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 33.3}}, + }}}, + ), + filter: "_name=='hahaha'", + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{}}, + + {desc: "Should ingest and query aggregates with empty bucket", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1537972278402, Value: 300.3}, + {Time: 1537972278402 + 8*tsdbtest.MinuteInMillis, Value: 300.3}, + {Time: 1537972278402 + 9*tsdbtest.MinuteInMillis, Value: 100.4}}, + }}}, + ), + from: 1537972278402 - 5*tsdbtest.MinuteInMillis, + to: 1537972278402 + 10*tsdbtest.MinuteInMillis, + step: defaultStepMs, + aggregates: "count", + expected: map[string][]tsdbtest.DataPoint{ + "count": {{Time: 1537972278402, Value: 1}, + {Time: 1537972578402, Value: 2}}}}, + + {desc: "Should ingest and query aggregates with few empty buckets in a row", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1537972278402, Value: 300.3}, + {Time: 1537972278402 + 16*tsdbtest.MinuteInMillis, Value: 300.3}, + {Time: 1537972278402 + 17*tsdbtest.MinuteInMillis, Value: 100.4}}, + }}}, + ), + from: 1537972278402 - 5*tsdbtest.MinuteInMillis, + to: 1537972278402 + 18*tsdbtest.MinuteInMillis, + step: defaultStepMs, + aggregates: "count", + expected: map[string][]tsdbtest.DataPoint{ + "count": {{Time: 1537972158402, Value: 1}, + {Time: 1537973058402, Value: 2}}}}, + + {desc: "Should ingest and query server-side aggregates", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 300.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 100.4}}, + }}}, + ), + from: 1532940510, + to: 1532940510 + 11, + step: 60 * tsdbtest.MinuteInMillis, + aggregates: "sum,count,min,max,sqr,last", + expected: map[string][]tsdbtest.DataPoint{"sum": {{Time: 1532940510, Value: 701.0}}, + "count": {{Time: 1532940510, Value: 3}}, + "min": {{Time: 1532940510, Value: 100.4}}, + "max": {{Time: 1532940510, Value: 300.3}}, + "sqr": {{Time: 1532940510, Value: 190440.3}}, + "last": {{Time: 1532940510, Value: 100.4}}}}, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testQueryDataCase(t, test.testParams, test.filter, test.aggregates, test.from, test.to, test.step, test.expected, test.expectFail) + }) + } +} + +func testQueryDataCase(test *testing.T, testParams tsdbtest.TestParams, filter string, queryAggregates string, + from int64, to int64, step int64, expected map[string][]tsdbtest.DataPoint, expectFail bool) { + + adapter, teardown := tsdbtest.SetUpWithData(test, testParams) + defer teardown() + + qry, err := adapter.Querier(nil, from, to) + if err != nil { + if expectFail { + return + } else { + test.Fatalf("Failed to create Querier. reason: %v", err) + } + } + + for _, metric := range testParams.TimeSeries() { + set, err := qry.Select(metric.Name, queryAggregates, step, filter) + if err != nil { + test.Fatalf("Failed to run Select. reason: %v", err) + } + + var counter int + for counter = 0; set.Next(); counter++ { + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + + series := set.At() + currentAggregate := series.Labels().Get(aggregate.AggregateLabel) + iter := series.Iterator() + if iter.Err() != nil { + test.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + + actual, err := iteratorToSlice(iter) + if err != nil { + test.Fatal(err) + } + + for _, data := range expected[currentAggregate] { + var equalCount = 0 + for _, dp := range actual { + if dp.Equals(data) { + equalCount++ + continue + } + } + assert.Equal(test, equalCount, len(expected[currentAggregate]), + "Check failed for aggregate='%s'. Query aggregates: %s", currentAggregate, queryAggregates) + } + } + + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + if counter == 0 && len(expected) > 0 { + test.Fatalf("No data was received") + } + } +} + +func TestQueryDataOverlappingWindow(t *testing.T) { + v3ioConfig, err := config.GetOrDefaultConfig() + if err != nil { + t.Fatalf("unable to load configuration. Error: %v", err) + } + + testCases := []struct { + desc string + metricName string + labels []utils.Label + data []tsdbtest.DataPoint + filter string + aggregates string + windows []int + from int64 + to int64 + expected map[string][]tsdbtest.DataPoint + ignoreReason string + }{ + {desc: "Should ingest and query with windowing", + metricName: "cpu", + labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 314.3}, + {Time: 1532944110, Value: 314.3}, + {Time: 1532947710, Value: 300.3}, + {Time: 1532951310, Value: 3234.6}}, + from: 0, to: 1532954910, + windows: []int{1, 2, 4}, + aggregates: "sum", + expected: map[string][]tsdbtest.DataPoint{ + "sum": {{Time: 1532937600, Value: 4163.5}, + {Time: 1532944800, Value: 3534.9}, + {Time: 1532948400, Value: 3234.6}}}, + }, + + {desc: "Should ingest and query with windowing on multiple agg", + metricName: "cpu", + labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 314.3}, + {Time: 1532944110, Value: 314.3}, + {Time: 1532947710, Value: 300.3}, + {Time: 1532951310, Value: 3234.6}}, + from: 0, to: 1532954910, + windows: []int{1, 2, 4}, + aggregates: "sum,count,sqr", + expected: map[string][]tsdbtest.DataPoint{ + "sum": {{Time: 1532937600, Value: 4163.5}, + {Time: 1532944800, Value: 3534.9}, + {Time: 1532948400, Value: 3234.6}}, + "count": {{Time: 1532937600, Value: 4}, + {Time: 1532944800, Value: 2}, + {Time: 1532948400, Value: 1}}, + "sqr": {{Time: 1532937600, Value: 10750386.23}, + {Time: 1532944800, Value: 10552817.25}, + {Time: 1532948400, Value: 10462637.16}}, + }, + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testQueryDataOverlappingWindowCase(t, v3ioConfig, test.metricName, test.labels, + test.data, test.filter, test.windows, test.aggregates, test.from, test.to, test.expected) + }) + } +} + +func testQueryDataOverlappingWindowCase(test *testing.T, v3ioConfig *config.V3ioConfig, + metricsName string, userLabels []utils.Label, data []tsdbtest.DataPoint, filter string, + windows []int, agg string, + from int64, to int64, expected map[string][]tsdbtest.DataPoint) { + + testParams := tsdbtest.NewTestParams(test, + tsdbtest.TestOption{Key: tsdbtest.OptV3ioConfig, Value: v3ioConfig}, + tsdbtest.TestOption{Key: tsdbtest.OptTimeSeries, Value: tsdbtest.TimeSeries{tsdbtest.Metric{Name: metricsName, Data: data, Labels: userLabels}}}, + ) + + adapter, teardown := tsdbtest.SetUpWithData(test, testParams) + defer teardown() + + var step int64 = 3600 + + qry, err := adapter.Querier(nil, from, to) + if err != nil { + test.Fatalf("Failed to create Querier. reason: %v", err) + } + + set, err := qry.SelectOverlap(metricsName, agg, step, windows, filter) + if err != nil { + test.Fatalf("Failed to run Select. reason: %v", err) + } + + var counter int + for counter = 0; set.Next(); counter++ { + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + + series := set.At() + agg := series.Labels().Get(aggregate.AggregateLabel) + iter := series.Iterator() + if iter.Err() != nil { + test.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + + actual, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + test.Fatal(err) + } + assert.EqualValues(test, len(windows), len(actual)) + for _, data := range expected[agg] { + var equalCount = 0 + for _, dp := range actual { + if dp.Equals(data) { + equalCount++ + continue + } + } + assert.Equal(test, equalCount, len(expected[agg])) + } + } + + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + if counter == 0 && len(expected) > 0 { + test.Fatalf("No data was received") + } +} + +// Calling Seek instead of next for the first time while iterating over data (TSDB-43) +func TestIgnoreNaNWhenSeekingAggSeries(t *testing.T) { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + if err != nil { + t.Fatalf("unable to load configuration. Error: %v", err) + } + metricsName := "cpu" + baseTime := int64(1532940510000) + userLabels := utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease") + data := []tsdbtest.DataPoint{{Time: baseTime, Value: 300.3}, + {Time: baseTime + tsdbtest.MinuteInMillis, Value: 300.3}, + {Time: baseTime + 2*tsdbtest.MinuteInMillis, Value: 100.4}, + {Time: baseTime + 5*tsdbtest.MinuteInMillis, Value: 200.0}} + from := int64(baseTime - 60*tsdbtest.MinuteInMillis) + to := int64(baseTime + 6*tsdbtest.MinuteInMillis) + step := int64(2 * tsdbtest.MinuteInMillis) + agg := "avg" + expected := map[string][]tsdbtest.DataPoint{ + "avg": {{baseTime, 300.3}, + {baseTime + step, 100.4}, + {baseTime + 2*step, 200}}} + + testParams := tsdbtest.NewTestParams(t, + tsdbtest.TestOption{Key: tsdbtest.OptV3ioConfig, Value: v3ioConfig}, + tsdbtest.TestOption{Key: tsdbtest.OptTimeSeries, Value: tsdbtest.TimeSeries{tsdbtest.Metric{Name: metricsName, Data: data, Labels: userLabels}}}, + ) + + adapter, teardown := tsdbtest.SetUpWithData(t, testParams) + defer teardown() + + qry, err := adapter.Querier(nil, from, to) + if err != nil { + t.Fatalf("Failed to create Querier. reason: %v", err) + } + + set, err := qry.Select(metricsName, agg, step, "") + if err != nil { + t.Fatalf("Failed to run Select. reason: %v", err) + } + + var counter int + for counter = 0; set.Next(); counter++ { + if set.Err() != nil { + t.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + + series := set.At() + agg := series.Labels().Get(aggregate.AggregateLabel) + iter := series.Iterator() + if iter.Err() != nil { + t.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + if !iter.Seek(0) { + t.Fatal("Seek time returned false, iterator error:", iter.Err()) + } + var actual []tsdbtest.DataPoint + t0, v0 := iter.At() + if iter.Err() != nil { + t.Fatal("error iterating over series", iter.Err()) + } + actual = append(actual, tsdbtest.DataPoint{Time: t0, Value: v0}) + for iter.Next() { + t1, v1 := iter.At() + + if iter.Err() != nil { + t.Fatal("error iterating over series", iter.Err()) + } + actual = append(actual, tsdbtest.DataPoint{Time: t1, Value: v1}) + } + + for _, data := range expected[agg] { + var equalCount = 0 + for _, dp := range actual { + if dp.Equals(data) { + equalCount++ + continue + } + } + assert.Equal(t, equalCount, len(expected[agg])) + } + } + + if set.Err() != nil { + t.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + if counter == 0 && len(expected) > 0 { + t.Fatalf("No data was received") + } +} + +func TestCreateTSDB(t *testing.T) { + testCases := []struct { + desc string + conf *config.Schema + ignoreReason string + }{ + {desc: "Should create TSDB with standard configuration", conf: testutils.CreateSchema(t, "sum,count")}, + + {desc: "Should create TSDB with wildcard aggregations", conf: testutils.CreateSchema(t, "*")}, + } + + testParams := tsdbtest.NewTestParams(t) + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testCreateTSDBcase(t, test.conf, testParams) + }) + } +} + +func testCreateTSDBcase(t *testing.T, dbConfig *config.Schema, testParams tsdbtest.TestParams) { + defer tsdbtest.SetUpWithDBConfig(t, dbConfig, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create adapter. reason: %s", err) + } + + actualDbConfig := adapter.GetSchema() + assert.Equal(t, actualDbConfig, dbConfig) +} + +func TestDeleteTSDB(t *testing.T) { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + if err != nil { + t.Fatalf("unable to load configuration. Error: %v", err) + } + + schema := testutils.CreateSchema(t, "count,sum") + v3ioConfig.TablePath = tsdbtest.PrefixTablePath(t.Name()) + if err := CreateTSDB(v3ioConfig, schema, nil); err != nil { + v3ioConfigAsJson, _ := json.MarshalIndent(v3ioConfig, "", " ") + t.Fatalf("Failed to create TSDB. Reason: %s\nConfiguration:\n%s", err, string(v3ioConfigAsJson)) + } + + adapter, err := NewV3ioAdapter(v3ioConfig, nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + responseChan := make(chan *v3io.Response) + container, _ := adapter.GetContainer() + _, err = container.GetContainerContents(&v3io.GetContainerContentsInput{Path: v3ioConfig.TablePath}, 30, responseChan) + if err != nil { + t.Fatal(err.Error()) + } + if res := <-responseChan; res.Error != nil { + t.Fatal(res.Error.Error()) + } + + if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { + t.Fatalf("Failed to delete DB on teardown. reason: %s", err) + } + + _, err = container.GetContainerContents(&v3io.GetContainerContentsInput{Path: v3ioConfig.TablePath}, 30, responseChan) + if err != nil { + t.Fatal(err.Error()) + } + if res := <-responseChan; res.Error == nil { + t.Fatal("Did not delete TSDB properly") + } +} + +func TestIngestDataFloatThenString(t *testing.T) { + testParams := tsdbtest.NewTestParams(t) + + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} + _, err = appender.Add(labels, 1532940510000, 12.0) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + _, err = appender.Add(labels, 1532940610000, "tal") + if err == nil { + t.Fatal("expected failure but finished successfully") + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + + tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) +} + +func TestIngestDataStringThenFloat(t *testing.T) { + testParams := tsdbtest.NewTestParams(t) + + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} + _, err = appender.Add(labels, 1532940510000, "tal") + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + _, err = appender.Add(labels, 1532940610000, 666.0) + if err == nil { + t.Fatal("expected failure but finished successfully") + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + + tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) +} + +func iteratorToSlice(it chunkenc.Iterator) ([]tsdbtest.DataPoint, error) { + var result []tsdbtest.DataPoint + for it.Next() { + t, v := it.At() + if it.Err() != nil { + return nil, it.Err() + } + result = append(result, tsdbtest.DataPoint{Time: t, Value: v}) + } + return result, nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go new file mode 100644 index 00000000..7b1090e0 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go @@ -0,0 +1,327 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package tsdbctl + +import ( + "encoding/csv" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const ArraySeparator = "," + +type addCommandeer struct { + cmd *cobra.Command + rootCommandeer *RootCommandeer + name string + lset string + tArr string + vArr string + inFile string + stdin bool + delay int +} + +func newAddCommandeer(rootCommandeer *RootCommandeer) *addCommandeer { + commandeer := &addCommandeer{ + rootCommandeer: rootCommandeer, + } + + cmd := &cobra.Command{ + Aliases: []string{"append"}, + Use: "add [] [] [flags]", + Short: "Add metric samples to a TSDB instance", + Long: `Add (ingest) metric samples into a TSDB instance (table).`, + Example: `The examples assume that the endpoint of the web-gateway service, the login credentials, and +the name of the data container are configured in the default configuration file (` + config.DefaultConfigurationFileName + `) +instead of using the -s|--server, -u|--username, -p|--password, and -c|--container flags. +- tsdbctl add temperature -t mytsdb -d 28 -m now-2h +- tsdbctl add http_req method=get -t mytsdb -d 99.9 +- tsdbctl add cpu "host=A,os=win" -t metrics-table -d "73.2,45.1" -m "1533026403000,now-1d" +- tsdbctl add -t perfstats -f ~/tsdb/tsdb_input.csv +- tsdbctl add log -t mytsdb -m now-2h -d "This thing has just happened" + +Notes: +- The command requires a metric name and one or more sample values. + You can provide this information either by using the argument and the -d|--values flag, + or by using the -f|--file flag to point to a CSV file that contains the required information. +- It is possible to ingest metrics containing string values, Though a single metric can contain either Floats or Strings, But not both. + +Arguments: + (string) The name of the metric for which to add samples. + The metric name must be provided either in this argument or in a + CSV file that is specified with the -f|--file flag. + (string) An optional list of labels to add, as a comma-separated list of + "
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VersionDescription
2.14.0Add support to pogs for interface types (#66 and #74)
2.13.1Fix bug with far far pointers (#71), use writev system call to encode multi-segment messages efficiently in Go 1.8+ (#70), and add GitHub-Linguist-compatible code generation comment
2.13.0Add Conn.Done and Conn.Err methods
2.12.4Fix size of created List(Float32)
2.12.3Fix bugs from fuzz tests: mismatched element size on list access causing crashes (#59) and miscellaneous packed reader issues
2.12.2Fix another shutdown race condition (#54)
2.12.1Fix ownership bug with receiver-hosted capabilities, add discriminant check to HasField (#55), fix multi-segment bug for data/text lists, and use nulls for setting empty data/text
2.12.0Add rpc.ConnLog option and fix race conditions and edge cases in RPC implementation
2.11.1Fix packed reader behavior on certain readers (#49), add capnp.UnmarshalPacked function that performs faster, and reduce locking overhead of segment maps
2.11.0Fix shutdown deadlock in RPC shutdown (#45)
2.10.1Work around lack of support for RPC-level promise capabilities (#2)
2.10.0Add pogs package (#33)
2.9.1Fix not-found behavior in schemas and add missing group IDs in generated embedded schemas
2.9.0Add encoding/text package (#20)
2.8.0Reduce generated code size for text fields and correct NUL check
2.7.0Insert compressed schema data into generated code
2.6.1Strip NUL byte from TextList.BytesAt and fix capnpc-go output for struct groups
2.6.0Add packages for predefined Cap'n Proto schemas
2.5.1Fix capnpc-go regression (#29) and strip trailing NUL byte in TextBytes accessor
2.5.0Add NewFoo method for list fields in generated structs (#7)
2.4.0Add maximum segment limit (#25)
2.3.0Add depth and traversal limit security checks
2.2.1Fix data race in reading Message from multiple goroutines
2.2.0Add HasFoo pointer field methods to generated code (#24)
2.1.0Introduce Ptr type and reduce allocations in single-segment cases
2.0.2Allow allocation-less string field access via TextList.BytesAt() and StringBytes() (#17)
2.0.1Allow nil params in client wrappers (#9) and fix integer underflow on compare function (#12)
2.0.0First release under zombiezen.com/go/capnproto2
diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/CONTRIBUTORS b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/CONTRIBUTORS new file mode 100644 index 00000000..bcdd5743 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/CONTRIBUTORS @@ -0,0 +1,33 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-capnproto repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# copyright belongs to the individual or the corporation. + +# Names should be added to this file like so: +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Alan Braithwaite +Albert Strasheim +Daniel Darabos +Eran Duchan +Evan Shaw +Ian Denhardt +James McKaskill +Jason E. Aten +Johan Hernandez +Joonsung Lee +Lev Radomislensky +Peter Waldschmidt +Ross Light +Tom Thorogood +TJ Holowaychuk +William Laffin diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/LICENSE b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/LICENSE new file mode 100644 index 00000000..3e590a19 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/LICENSE @@ -0,0 +1,25 @@ +go-capnproto is licensed under the terms of the MIT license reproduced below. + +=============================================================================== + +Copyright (C) 2014 the go-capnproto authors and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/README.md b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/README.md new file mode 100644 index 00000000..47d072b3 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/README.md @@ -0,0 +1,68 @@ +# Cap'n Proto bindings for Go + +[![GoDoc](https://godoc.org/zombiezen.com/go/capnproto2?status.svg)][godoc] +[![Build Status](https://travis-ci.org/capnproto/go-capnproto2.svg?branch=master)][travis] + +go-capnproto consists of: +- a Go code generator for [Cap'n Proto](https://capnproto.org/) +- a Go package that provides runtime support +- a Go package that implements Level 1 of the RPC protocol + +[godoc]: https://godoc.org/zombiezen.com/go/capnproto2 +[travis]: https://travis-ci.org/capnproto/go-capnproto2 + +## Getting started + +You will need the `capnp` tool to compile schemas into Go. +This package has been tested with Cap'n Proto 0.5.0. + +``` +$ go get -u -t zombiezen.com/go/capnproto2/... +$ go test -v zombiezen.com/go/capnproto2/... +``` + +This library uses [SemVer tags][] to indicate stable releases. +While the goal is that master should always be passing all known tests, tagged releases are vetted more. +When possible, use the [latest release tag](https://github.com/capnproto/go-capnproto2/releases). + +``` +$ cd $GOPATH/src/zombiezen.com/go/capnproto2 +$ git fetch +$ git checkout v2.16.0 # check the releases page for the latest +``` + +Then read the [Getting Started guide][]. + +[SemVer tags]: http://semver.org/ +[Getting Started guide]: https://github.com/capnproto/go-capnproto2/wiki/Getting-Started + +## API Compatibility + +Consider this package's API as beta software, since the Cap'n Proto spec is not final. +In the spirit of the [Go 1 compatibility guarantee][gocompat], I will make every effort to avoid making breaking API changes. +The major cases where I reserve the right to make breaking changes are: + +- Security. +- Changes in the Cap'n Proto specification. +- Bugs. + +The `pogs` package is relatively new and may change over time. +However, its functionality has been well-tested and will probably only relax restrictions. + +[gocompat]: https://golang.org/doc/go1compat + +## Documentation + +See the docs on [godoc.org][godoc]. + +## What is Cap'n Proto? + +The best cerealization... + +https://capnproto.org/ + +## License + +MIT - see [LICENSE][] file + +[LICENSE]: https://github.com/capnproto/go-capnproto2/blob/master/LICENSE diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/WORKSPACE b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/WORKSPACE new file mode 100644 index 00000000..9154090a --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/WORKSPACE @@ -0,0 +1,31 @@ +workspace(name = "com_zombiezen_go_capnproto2") + +git_repository( + name = "io_bazel_rules_go", + remote = "https://github.com/bazelbuild/rules_go.git", + commit = "43a3bda3eb97e7bcd86f564a1e0a4b008d6c407c", +) + +load("@io_bazel_rules_go//go:def.bzl", "go_repositories", "go_repository") + +go_repositories() + +go_repository( + name = "com_github_kylelemons_godebug", + importpath = "github.com/kylelemons/godebug", + sha256 = "4415b09bae90e41695bc17e4d00d0708e1f6bbb6e21cc22ce0146a26ddc243a7", + strip_prefix = "godebug-a616ab194758ae0a11290d87ca46ee8c440117b0", + urls = [ + "https://github.com/kylelemons/godebug/archive/a616ab194758ae0a11290d87ca46ee8c440117b0.zip", + ], +) + +go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sha256 = "880dc04d0af397dce6875ee2349bbb4295fe5a47352f7a4da4270456f726edd4", + strip_prefix = "net-f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f", + urls = [ + "https://github.com/golang/net/archive/f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f.zip", + ], +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/address.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/address.go new file mode 100644 index 00000000..0e06dc49 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/address.go @@ -0,0 +1,116 @@ +package capnp + +// An Address is an index inside a segment's data (in bytes). +type Address uint32 + +// addSize returns the address a+sz. +func (a Address) addSize(sz Size) (b Address, ok bool) { + x := int64(a) + int64(sz) + if x > int64(maxSize) { + return 0, false + } + return Address(x), true +} + +// element returns the address a+i*sz. +func (a Address) element(i int32, sz Size) (b Address, ok bool) { + x := int64(i) * int64(sz) + if x > int64(maxSize) { + return 0, false + } + x += int64(a) + if x > int64(maxSize) { + return 0, false + } + return Address(x), true +} + +// addOffset returns the address a+o. +func (a Address) addOffset(o DataOffset) Address { + return a + Address(o) +} + +// A Size is a size (in bytes). +type Size uint32 + +// wordSize is the number of bytes in a Cap'n Proto word. +const wordSize Size = 8 + +// maxSize is the maximum representable size. +const maxSize Size = 1<<32 - 1 + +// times returns the size sz*n. +func (sz Size) times(n int32) (ns Size, ok bool) { + x := int64(sz) * int64(n) + if x > int64(maxSize) { + return 0, false + } + return Size(x), true +} + +// padToWord adds padding to sz to make it divisible by wordSize. +func (sz Size) padToWord() Size { + n := Size(wordSize - 1) + return (sz + n) &^ n +} + +// DataOffset is an offset in bytes from the beginning of a struct's data section. +type DataOffset uint32 + +// ObjectSize records section sizes for a struct or list. +type ObjectSize struct { + DataSize Size + PointerCount uint16 +} + +// isZero reports whether sz is the zero size. +func (sz ObjectSize) isZero() bool { + return sz.DataSize == 0 && sz.PointerCount == 0 +} + +// isOneByte reports whether the object size is one byte (for Text/Data element sizes). +func (sz ObjectSize) isOneByte() bool { + return sz.DataSize == 1 && sz.PointerCount == 0 +} + +// isValid reports whether sz's fields are in range. +func (sz ObjectSize) isValid() bool { + return sz.DataSize <= 0xffff*wordSize +} + +// pointerSize returns the number of bytes the pointer section occupies. +func (sz ObjectSize) pointerSize() Size { + // Guaranteed not to overflow + return wordSize * Size(sz.PointerCount) +} + +// totalSize returns the number of bytes that the object occupies. +func (sz ObjectSize) totalSize() Size { + return sz.DataSize + sz.pointerSize() +} + +// dataWordCount returns the number of words in the data section. +func (sz ObjectSize) dataWordCount() int32 { + if sz.DataSize%wordSize != 0 { + panic("data size not aligned by word") + } + return int32(sz.DataSize / wordSize) +} + +// totalWordCount returns the number of words that the object occupies. +func (sz ObjectSize) totalWordCount() int32 { + return sz.dataWordCount() + int32(sz.PointerCount) +} + +// BitOffset is an offset in bits from the beginning of a struct's data section. +type BitOffset uint32 + +// offset returns the equivalent byte offset. +func (bit BitOffset) offset() DataOffset { + return DataOffset(bit / 8) +} + +// mask returns the bitmask for the bit. +func (bit BitOffset) mask() byte { + return byte(1 << (bit % 8)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/canonical.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/canonical.go new file mode 100644 index 00000000..40e5f2ba --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/canonical.go @@ -0,0 +1,161 @@ +package capnp + +import ( + "errors" + "fmt" +) + +// Canonicalize encodes a struct into its canonical form: a single- +// segment blob without a segment table. The result will be identical +// for equivalent structs, even as the schema evolves. The blob is +// suitable for hashing or signing. +func Canonicalize(s Struct) ([]byte, error) { + msg, seg, _ := NewMessage(SingleSegment(nil)) + if !s.IsValid() { + return seg.Data(), nil + } + root, err := NewRootStruct(seg, canonicalStructSize(s)) + if err != nil { + return nil, fmt.Errorf("canonicalize: %v", err) + } + if err := msg.SetRootPtr(root.ToPtr()); err != nil { + return nil, fmt.Errorf("canonicalize: %v", err) + } + if err := fillCanonicalStruct(root, s); err != nil { + return nil, fmt.Errorf("canonicalize: %v", err) + } + return seg.Data(), nil +} + +func canonicalPtr(dst *Segment, p Ptr) (Ptr, error) { + if !p.IsValid() { + return Ptr{}, nil + } + switch p.flags.ptrType() { + case structPtrType: + ss, err := NewStruct(dst, canonicalStructSize(p.Struct())) + if err != nil { + return Ptr{}, err + } + if err := fillCanonicalStruct(ss, p.Struct()); err != nil { + return Ptr{}, err + } + return ss.ToPtr(), nil + case listPtrType: + ll, err := canonicalList(dst, p.List()) + if err != nil { + return Ptr{}, err + } + return ll.ToPtr(), nil + case interfacePtrType: + return Ptr{}, errors.New("cannot canonicalize interface") + default: + panic("unreachable") + } +} + +func fillCanonicalStruct(dst, s Struct) error { + copy(dst.seg.slice(dst.off, dst.size.DataSize), s.seg.slice(s.off, s.size.DataSize)) + for i := uint16(0); i < dst.size.PointerCount; i++ { + p, err := s.Ptr(i) + if err != nil { + return fmt.Errorf("pointer %d: %v", i, err) + } + cp, err := canonicalPtr(dst.seg, p) + if err != nil { + return fmt.Errorf("pointer %d: %v", i, err) + } + if err := dst.SetPtr(i, cp); err != nil { + return fmt.Errorf("pointer %d: %v", i, err) + } + } + return nil +} + +func canonicalStructSize(s Struct) ObjectSize { + if !s.IsValid() { + return ObjectSize{} + } + var sz ObjectSize + // int32 will not overflow because max struct data size is 2^16 words. + for off := int32(s.size.DataSize &^ (wordSize - 1)); off >= 0; off -= int32(wordSize) { + if s.Uint64(DataOffset(off)) != 0 { + sz.DataSize = Size(off) + wordSize + break + } + } + for i := int32(s.size.PointerCount) - 1; i >= 0; i-- { + if s.seg.readRawPointer(s.pointerAddress(uint16(i))) != 0 { + sz.PointerCount = uint16(i + 1) + break + } + } + return sz +} + +func canonicalList(dst *Segment, l List) (List, error) { + if !l.IsValid() { + return List{}, nil + } + if l.size.PointerCount == 0 { + // Data only, just copy over. + sz := l.allocSize() + _, newAddr, err := alloc(dst, sz) + if err != nil { + return List{}, err + } + cl := List{ + seg: dst, + off: newAddr, + length: l.length, + size: l.size, + flags: l.flags, + depthLimit: maxDepth, + } + end, _ := l.off.addSize(sz) // list was already validated + copy(dst.data[newAddr:], l.seg.data[l.off:end]) + return cl, nil + } + if l.flags&isCompositeList == 0 { + cl, err := NewPointerList(dst, l.length) + if err != nil { + return List{}, err + } + for i := 0; i < l.Len(); i++ { + p, err := PointerList{l}.PtrAt(i) + if err != nil { + return List{}, fmt.Errorf("element %d: %v", i, err) + } + cp, err := canonicalPtr(dst, p) + if err != nil { + return List{}, fmt.Errorf("element %d: %v", i, err) + } + if err := cl.SetPtr(i, cp); err != nil { + return List{}, fmt.Errorf("element %d: %v", i, err) + } + } + return cl.List, nil + } + + // Struct/composite list + var elemSize ObjectSize + for i := 0; i < l.Len(); i++ { + sz := canonicalStructSize(l.Struct(i)) + if sz.DataSize > elemSize.DataSize { + elemSize.DataSize = sz.DataSize + } + if sz.PointerCount > elemSize.PointerCount { + elemSize.PointerCount = sz.PointerCount + } + } + cl, err := NewCompositeList(dst, elemSize, l.length) + if err != nil { + return List{}, err + } + for i := 0; i < cl.Len(); i++ { + if err := fillCanonicalStruct(cl.Struct(i), l.Struct(i)); err != nil { + return List{}, fmt.Errorf("element %d: %v", i, err) + } + } + return cl, nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capability.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capability.go new file mode 100644 index 00000000..d11c680d --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capability.go @@ -0,0 +1,541 @@ +package capnp + +import ( + "errors" + "strconv" + + "golang.org/x/net/context" +) + +// An Interface is a reference to a client in a message's capability table. +type Interface struct { + seg *Segment + cap CapabilityID +} + +// NewInterface creates a new interface pointer. No allocation is +// performed; s is only used for Segment()'s return value. +func NewInterface(s *Segment, cap CapabilityID) Interface { + return Interface{ + seg: s, + cap: cap, + } +} + +// ToInterface converts p to an Interface. +// +// Deprecated: Use Ptr.Interface. +func ToInterface(p Pointer) Interface { + if !IsValid(p) { + return Interface{} + } + i, ok := p.underlying().(Interface) + if !ok { + return Interface{} + } + return i +} + +// ToPtr converts the interface to a generic pointer. +func (p Interface) ToPtr() Ptr { + return Ptr{ + seg: p.seg, + lenOrCap: uint32(p.cap), + flags: interfacePtrFlag, + } +} + +// Segment returns the segment this pointer came from. +func (i Interface) Segment() *Segment { + return i.seg +} + +// IsValid returns whether the interface is valid. +func (i Interface) IsValid() bool { + return i.seg != nil +} + +// HasData is always true. +func (i Interface) HasData() bool { + return true +} + +// Capability returns the capability ID of the interface. +func (i Interface) Capability() CapabilityID { + return i.cap +} + +// value returns a raw interface pointer with the capability ID. +func (i Interface) value(paddr Address) rawPointer { + if i.seg == nil { + return 0 + } + return rawInterfacePointer(i.cap) +} + +func (i Interface) underlying() Pointer { + return i +} + +// Client returns the client stored in the message's capability table +// or nil if the pointer is invalid. +func (i Interface) Client() Client { + if i.seg == nil { + return nil + } + tab := i.seg.msg.CapTable + if int64(i.cap) >= int64(len(tab)) { + return nil + } + return tab[i.cap] +} + +// ErrNullClient is returned from a call made on a null client pointer. +var ErrNullClient = errors.New("capnp: call on null client") + +// A CapabilityID is an index into a message's capability table. +type CapabilityID uint32 + +// A Client represents an Cap'n Proto interface type. It is safe to use +// from multiple goroutines. +// +// Generally, only RPC protocol implementers should provide types that +// implement Client: call ordering guarantees, promises, and +// synchronization are tricky to get right. Prefer creating a server +// that wraps another interface than trying to implement Client. +type Client interface { + // Call starts executing a method and returns an answer that will hold + // the resulting struct. The call's parameters must be placed before + // Call() returns. + // + // Calls are delivered to the capability in the order they are made. + // This guarantee is based on the concept of a capability + // acknowledging delivery of a call: this is specific to an + // implementation of Client. A type that implements Client must + // guarantee that if foo() then bar() is called on a client, that + // acknowledging foo() happens before acknowledging bar(). + Call(call *Call) Answer + + // Close releases any resources associated with this client. + // No further calls to the client should be made after calling Close. + Close() error +} + +// The Call type holds the record for an outgoing interface call. +type Call struct { + // Ctx is the context of the call. + Ctx context.Context + + // Method is the interface ID and method ID, along with the optional name, + // of the method to call. + Method Method + + // Params is a struct containing parameters for the call. + // This should be set when the RPC system receives a call for an + // exported interface. It is mutually exclusive with ParamsFunc + // and ParamsSize. + Params Struct + // ParamsFunc is a function that populates an allocated struct with + // the parameters for the call. ParamsSize determines the size of the + // struct to allocate. This is used when application code is using a + // client. These settings should be set together; they are mutually + // exclusive with Params. + ParamsFunc func(Struct) error + ParamsSize ObjectSize + + // Options passes RPC-specific options for the call. + Options CallOptions +} + +// Copy clones a call, ensuring that its Params are placed. +// If Call.ParamsFunc is nil, then the same Call will be returned. +func (call *Call) Copy(s *Segment) (*Call, error) { + if call.ParamsFunc == nil { + return call, nil + } + p, err := call.PlaceParams(s) + if err != nil { + return nil, err + } + return &Call{ + Ctx: call.Ctx, + Method: call.Method, + Params: p, + Options: call.Options, + }, nil +} + +// PlaceParams returns the parameters struct, allocating it inside +// segment s as necessary. If s is nil, a new single-segment message +// is allocated. +func (call *Call) PlaceParams(s *Segment) (Struct, error) { + if call.ParamsFunc == nil { + return call.Params, nil + } + if s == nil { + var err error + _, s, err = NewMessage(SingleSegment(nil)) + if err != nil { + return Struct{}, err + } + } + p, err := NewStruct(s, call.ParamsSize) + if err != nil { + return Struct{}, nil + } + err = call.ParamsFunc(p) + return p, err +} + +// CallOptions holds RPC-specific options for an interface call. +// Its usage is similar to the values in context.Context, but is only +// used for a single call: its values are not intended to propagate to +// other callees. An example of an option would be the +// Call.sendResultsTo field in rpc.capnp. +type CallOptions struct { + m map[interface{}]interface{} +} + +// NewCallOptions builds a CallOptions value from a list of individual options. +func NewCallOptions(opts []CallOption) CallOptions { + co := CallOptions{make(map[interface{}]interface{})} + for _, o := range opts { + o.f(co) + } + return co +} + +// Value retrieves the value associated with the options for this key, +// or nil if no value is associated with this key. +func (co CallOptions) Value(key interface{}) interface{} { + return co.m[key] +} + +// With creates a copy of the CallOptions value with other options applied. +func (co CallOptions) With(opts []CallOption) CallOptions { + newopts := CallOptions{make(map[interface{}]interface{})} + for k, v := range co.m { + newopts.m[k] = v + } + for _, o := range opts { + o.f(newopts) + } + return newopts +} + +// A CallOption is a function that modifies options on an interface call. +type CallOption struct { + f func(CallOptions) +} + +// SetOptionValue returns a call option that associates a value to an +// option key. This can be retrieved later with CallOptions.Value. +func SetOptionValue(key, value interface{}) CallOption { + return CallOption{func(co CallOptions) { + co.m[key] = value + }} +} + +// An Answer is the deferred result of a client call, which is usually wrapped by a Pipeline. +type Answer interface { + // Struct waits until the call is finished and returns the result. + Struct() (Struct, error) + + // The following methods are the same as in Client except with + // an added transform parameter -- a path to the interface to use. + + PipelineCall(transform []PipelineOp, call *Call) Answer + PipelineClose(transform []PipelineOp) error +} + +// A Pipeline is a generic wrapper for an answer. +type Pipeline struct { + answer Answer + parent *Pipeline + op PipelineOp +} + +// NewPipeline returns a new pipeline based on an answer. +func NewPipeline(ans Answer) *Pipeline { + return &Pipeline{answer: ans} +} + +// Answer returns the answer the pipeline is derived from. +func (p *Pipeline) Answer() Answer { + return p.answer +} + +// Transform returns the operations needed to transform the root answer +// into the value p represents. +func (p *Pipeline) Transform() []PipelineOp { + n := 0 + for q := p; q.parent != nil; q = q.parent { + n++ + } + xform := make([]PipelineOp, n) + for i, q := n-1, p; q.parent != nil; i, q = i-1, q.parent { + xform[i] = q.op + } + return xform +} + +// Struct waits until the answer is resolved and returns the struct +// this pipeline represents. +func (p *Pipeline) Struct() (Struct, error) { + s, err := p.answer.Struct() + if err != nil { + return Struct{}, err + } + ptr, err := TransformPtr(s.ToPtr(), p.Transform()) + if err != nil { + return Struct{}, err + } + return ptr.Struct(), nil +} + +// Client returns the client version of p. +func (p *Pipeline) Client() *PipelineClient { + return (*PipelineClient)(p) +} + +// GetPipeline returns a derived pipeline which yields the pointer field given. +func (p *Pipeline) GetPipeline(off uint16) *Pipeline { + return p.GetPipelineDefault(off, nil) +} + +// GetPipelineDefault returns a derived pipeline which yields the pointer field given, +// defaulting to the value given. +func (p *Pipeline) GetPipelineDefault(off uint16, def []byte) *Pipeline { + return &Pipeline{ + answer: p.answer, + parent: p, + op: PipelineOp{ + Field: off, + DefaultValue: def, + }, + } +} + +// PipelineClient implements Client by calling to the pipeline's answer. +type PipelineClient Pipeline + +func (pc *PipelineClient) transform() []PipelineOp { + return (*Pipeline)(pc).Transform() +} + +// Call calls Answer.PipelineCall with the pipeline's transform. +func (pc *PipelineClient) Call(call *Call) Answer { + return pc.answer.PipelineCall(pc.transform(), call) +} + +// Close calls Answer.PipelineClose with the pipeline's transform. +func (pc *PipelineClient) Close() error { + return pc.answer.PipelineClose(pc.transform()) +} + +// A PipelineOp describes a step in transforming a pipeline. +// It maps closely with the PromisedAnswer.Op struct in rpc.capnp. +type PipelineOp struct { + Field uint16 + DefaultValue []byte +} + +// String returns a human-readable description of op. +func (op PipelineOp) String() string { + s := make([]byte, 0, 32) + s = append(s, "get field "...) + s = strconv.AppendInt(s, int64(op.Field), 10) + if op.DefaultValue == nil { + return string(s) + } + s = append(s, " with default"...) + return string(s) +} + +// A Method identifies a method along with an optional human-readable +// description of the method. +type Method struct { + InterfaceID uint64 + MethodID uint16 + + // Canonical name of the interface. May be empty. + InterfaceName string + // Method name as it appears in the schema. May be empty. + MethodName string +} + +// String returns a formatted string containing the interface name or +// the method name if present, otherwise it uses the raw IDs. +// This is suitable for use in error messages and logs. +func (m *Method) String() string { + buf := make([]byte, 0, 128) + if m.InterfaceName == "" { + buf = append(buf, '@', '0', 'x') + buf = strconv.AppendUint(buf, m.InterfaceID, 16) + } else { + buf = append(buf, m.InterfaceName...) + } + buf = append(buf, '.') + if m.MethodName == "" { + buf = append(buf, '@') + buf = strconv.AppendUint(buf, uint64(m.MethodID), 10) + } else { + buf = append(buf, m.MethodName...) + } + return string(buf) +} + +// Transform applies a sequence of pipeline operations to a pointer +// and returns the result. +// +// Deprecated: Use TransformPtr. +func Transform(p Pointer, transform []PipelineOp) (Pointer, error) { + pp, err := TransformPtr(toPtr(p), transform) + return pp.toPointer(), err +} + +// TransformPtr applies a sequence of pipeline operations to a pointer +// and returns the result. +func TransformPtr(p Ptr, transform []PipelineOp) (Ptr, error) { + n := len(transform) + if n == 0 { + return p, nil + } + s := p.Struct() + for _, op := range transform[:n-1] { + field, err := s.Ptr(op.Field) + if err != nil { + return Ptr{}, err + } + s, err = field.StructDefault(op.DefaultValue) + if err != nil { + return Ptr{}, err + } + } + op := transform[n-1] + p, err := s.Ptr(op.Field) + if err != nil { + return Ptr{}, err + } + if op.DefaultValue != nil { + p, err = p.Default(op.DefaultValue) + } + return p, err +} + +type immediateAnswer struct { + s Struct +} + +// ImmediateAnswer returns an Answer that accesses s. +func ImmediateAnswer(s Struct) Answer { + return immediateAnswer{s} +} + +func (ans immediateAnswer) Struct() (Struct, error) { + return ans.s, nil +} + +func (ans immediateAnswer) findClient(transform []PipelineOp) Client { + p, err := TransformPtr(ans.s.ToPtr(), transform) + if err != nil { + return ErrorClient(err) + } + return p.Interface().Client() +} + +func (ans immediateAnswer) PipelineCall(transform []PipelineOp, call *Call) Answer { + c := ans.findClient(transform) + if c == nil { + return ErrorAnswer(ErrNullClient) + } + return c.Call(call) +} + +func (ans immediateAnswer) PipelineClose(transform []PipelineOp) error { + c := ans.findClient(transform) + if c == nil { + return ErrNullClient + } + return c.Close() +} + +type errorAnswer struct { + e error +} + +// ErrorAnswer returns a Answer that always returns error e. +func ErrorAnswer(e error) Answer { + return errorAnswer{e} +} + +func (ans errorAnswer) Struct() (Struct, error) { + return Struct{}, ans.e +} + +func (ans errorAnswer) PipelineCall([]PipelineOp, *Call) Answer { + return ans +} + +func (ans errorAnswer) PipelineClose([]PipelineOp) error { + return ans.e +} + +// IsFixedAnswer reports whether an answer was created by +// ImmediateAnswer or ErrorAnswer. +func IsFixedAnswer(ans Answer) bool { + switch ans.(type) { + case immediateAnswer: + return true + case errorAnswer: + return true + default: + return false + } +} + +type errorClient struct { + e error +} + +// ErrorClient returns a Client that always returns error e. +func ErrorClient(e error) Client { + return errorClient{e} +} + +func (ec errorClient) Call(*Call) Answer { + return ErrorAnswer(ec.e) +} + +func (ec errorClient) Close() error { + return nil +} + +// IsErrorClient reports whether c was created with ErrorClient. +func IsErrorClient(c Client) bool { + _, ok := c.(errorClient) + return ok +} + +// MethodError is an error on an associated method. +type MethodError struct { + Method *Method + Err error +} + +// Error returns the method name concatenated with the error string. +func (e *MethodError) Error() string { + return e.Method.String() + ": " + e.Err.Error() +} + +// ErrUnimplemented is the error returned when a method is called on +// a server that does not implement the method. +var ErrUnimplemented = errors.New("capnp: method not implemented") + +// IsUnimplemented reports whether e indicates an unimplemented method error. +func IsUnimplemented(e error) bool { + if me, ok := e.(*MethodError); ok { + e = me.Err + } + return e == ErrUnimplemented +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capn.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capn.go new file mode 100644 index 00000000..6de4c836 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capn.go @@ -0,0 +1,427 @@ +package capnp + +import ( + "encoding/binary" + "errors" +) + +// A SegmentID is a numeric identifier for a Segment. +type SegmentID uint32 + +// A Segment is an allocation arena for Cap'n Proto objects. +// It is part of a Message, which can contain other segments that +// reference each other. +type Segment struct { + msg *Message + id SegmentID + data []byte +} + +// Message returns the message that contains s. +func (s *Segment) Message() *Message { + return s.msg +} + +// ID returns the segment's ID. +func (s *Segment) ID() SegmentID { + return s.id +} + +// Data returns the raw byte slice for the segment. +func (s *Segment) Data() []byte { + return s.data +} + +func (s *Segment) inBounds(addr Address) bool { + return addr < Address(len(s.data)) +} + +func (s *Segment) regionInBounds(base Address, sz Size) bool { + end, ok := base.addSize(sz) + if !ok { + return false + } + return end <= Address(len(s.data)) +} + +// slice returns the segment of data from base to base+sz. +func (s *Segment) slice(base Address, sz Size) []byte { + // Bounds check should have happened before calling slice. + return s.data[base : base+Address(sz)] +} + +func (s *Segment) readUint8(addr Address) uint8 { + return s.slice(addr, 1)[0] +} + +func (s *Segment) readUint16(addr Address) uint16 { + return binary.LittleEndian.Uint16(s.slice(addr, 2)) +} + +func (s *Segment) readUint32(addr Address) uint32 { + return binary.LittleEndian.Uint32(s.slice(addr, 4)) +} + +func (s *Segment) readUint64(addr Address) uint64 { + return binary.LittleEndian.Uint64(s.slice(addr, 8)) +} + +func (s *Segment) readRawPointer(addr Address) rawPointer { + return rawPointer(s.readUint64(addr)) +} + +func (s *Segment) writeUint8(addr Address, val uint8) { + s.slice(addr, 1)[0] = val +} + +func (s *Segment) writeUint16(addr Address, val uint16) { + binary.LittleEndian.PutUint16(s.slice(addr, 2), val) +} + +func (s *Segment) writeUint32(addr Address, val uint32) { + binary.LittleEndian.PutUint32(s.slice(addr, 4), val) +} + +func (s *Segment) writeUint64(addr Address, val uint64) { + binary.LittleEndian.PutUint64(s.slice(addr, 8), val) +} + +func (s *Segment) writeRawPointer(addr Address, val rawPointer) { + s.writeUint64(addr, uint64(val)) +} + +// root returns a 1-element pointer list that references the first word +// in the segment. This only makes sense to call on the first segment +// in a message. +func (s *Segment) root() PointerList { + sz := ObjectSize{PointerCount: 1} + if !s.regionInBounds(0, sz.totalSize()) { + return PointerList{} + } + return PointerList{List{ + seg: s, + length: 1, + size: sz, + depthLimit: s.msg.depthLimit(), + }} +} + +func (s *Segment) lookupSegment(id SegmentID) (*Segment, error) { + if s.id == id { + return s, nil + } + return s.msg.Segment(id) +} + +func (s *Segment) readPtr(paddr Address, depthLimit uint) (ptr Ptr, err error) { + s, base, val, err := s.resolveFarPointer(paddr) + if err != nil { + return Ptr{}, err + } + if val == 0 { + return Ptr{}, nil + } + if depthLimit == 0 { + return Ptr{}, errDepthLimit + } + switch val.pointerType() { + case structPointer: + sp, err := s.readStructPtr(base, val) + if err != nil { + return Ptr{}, err + } + if !s.msg.ReadLimiter().canRead(sp.readSize()) { + return Ptr{}, errReadLimit + } + sp.depthLimit = depthLimit - 1 + return sp.ToPtr(), nil + case listPointer: + lp, err := s.readListPtr(base, val) + if err != nil { + return Ptr{}, err + } + if !s.msg.ReadLimiter().canRead(lp.readSize()) { + return Ptr{}, errReadLimit + } + lp.depthLimit = depthLimit - 1 + return lp.ToPtr(), nil + case otherPointer: + if val.otherPointerType() != 0 { + return Ptr{}, errOtherPointer + } + return Interface{ + seg: s, + cap: val.capabilityIndex(), + }.ToPtr(), nil + default: + // Only other types are far pointers. + return Ptr{}, errBadLandingPad + } +} + +func (s *Segment) readStructPtr(base Address, val rawPointer) (Struct, error) { + addr, ok := val.offset().resolve(base) + if !ok { + return Struct{}, errPointerAddress + } + sz := val.structSize() + if !s.regionInBounds(addr, sz.totalSize()) { + return Struct{}, errPointerAddress + } + return Struct{ + seg: s, + off: addr, + size: sz, + }, nil +} + +func (s *Segment) readListPtr(base Address, val rawPointer) (List, error) { + addr, ok := val.offset().resolve(base) + if !ok { + return List{}, errPointerAddress + } + lsize, ok := val.totalListSize() + if !ok { + return List{}, errOverflow + } + if !s.regionInBounds(addr, lsize) { + return List{}, errPointerAddress + } + lt := val.listType() + if lt == compositeList { + hdr := s.readRawPointer(addr) + var ok bool + addr, ok = addr.addSize(wordSize) + if !ok { + return List{}, errOverflow + } + if hdr.pointerType() != structPointer { + return List{}, errBadTag + } + sz := hdr.structSize() + n := int32(hdr.offset()) + // TODO(light): check that this has the same end address + if tsize, ok := sz.totalSize().times(n); !ok { + return List{}, errOverflow + } else if !s.regionInBounds(addr, tsize) { + return List{}, errPointerAddress + } + return List{ + seg: s, + size: sz, + off: addr, + length: n, + flags: isCompositeList, + }, nil + } + if lt == bit1List { + return List{ + seg: s, + off: addr, + length: val.numListElements(), + flags: isBitList, + }, nil + } + return List{ + seg: s, + size: val.elementSize(), + off: addr, + length: val.numListElements(), + }, nil +} + +func (s *Segment) resolveFarPointer(paddr Address) (dst *Segment, base Address, resolved rawPointer, err error) { + // Encoding details at https://capnproto.org/encoding.html#inter-segment-pointers + + val := s.readRawPointer(paddr) + switch val.pointerType() { + case doubleFarPointer: + padSeg, err := s.lookupSegment(val.farSegment()) + if err != nil { + return nil, 0, 0, err + } + padAddr := val.farAddress() + if !padSeg.regionInBounds(padAddr, wordSize*2) { + return nil, 0, 0, errPointerAddress + } + far := padSeg.readRawPointer(padAddr) + if far.pointerType() != farPointer { + return nil, 0, 0, errBadLandingPad + } + tagAddr, ok := padAddr.addSize(wordSize) + if !ok { + return nil, 0, 0, errOverflow + } + tag := padSeg.readRawPointer(tagAddr) + if pt := tag.pointerType(); (pt != structPointer && pt != listPointer) || tag.offset() != 0 { + return nil, 0, 0, errBadLandingPad + } + if dst, err = s.lookupSegment(far.farSegment()); err != nil { + return nil, 0, 0, err + } + return dst, 0, landingPadNearPointer(far, tag), nil + case farPointer: + var err error + dst, err = s.lookupSegment(val.farSegment()) + if err != nil { + return nil, 0, 0, err + } + padAddr := val.farAddress() + if !dst.regionInBounds(padAddr, wordSize) { + return nil, 0, 0, errPointerAddress + } + var ok bool + base, ok = padAddr.addSize(wordSize) + if !ok { + return nil, 0, 0, errOverflow + } + return dst, base, dst.readRawPointer(padAddr), nil + default: + var ok bool + base, ok = paddr.addSize(wordSize) + if !ok { + return nil, 0, 0, errOverflow + } + return s, base, val, nil + } +} + +func (s *Segment) writePtr(off Address, src Ptr, forceCopy bool) error { + if !src.IsValid() { + s.writeRawPointer(off, 0) + return nil + } + + // Copy src, if needed, and process pointers where placement is + // irrelevant (capabilities and zero-sized structs). + var srcAddr Address + var srcRaw rawPointer + switch src.flags.ptrType() { + case structPtrType: + st := src.Struct() + if st.size.isZero() { + // Zero-sized structs should always be encoded with offset -1 in + // order to avoid conflating with null. No allocation needed. + s.writeRawPointer(off, rawStructPointer(-1, ObjectSize{})) + return nil + } + if forceCopy || src.seg.msg != s.msg || st.flags&isListMember != 0 { + newSeg, newAddr, err := alloc(s, st.size.totalSize()) + if err != nil { + return err + } + dst := Struct{ + seg: newSeg, + off: newAddr, + size: st.size, + depthLimit: maxDepth, + // clear flags + } + if err := copyStruct(dst, st); err != nil { + return err + } + st = dst + src = dst.ToPtr() + } + srcAddr = st.off + srcRaw = rawStructPointer(0, st.size) + case listPtrType: + l := src.List() + if forceCopy || src.seg.msg != s.msg { + sz := l.allocSize() + newSeg, newAddr, err := alloc(s, sz) + if err != nil { + return err + } + dst := List{ + seg: newSeg, + off: newAddr, + length: l.length, + size: l.size, + flags: l.flags, + depthLimit: maxDepth, + } + if dst.flags&isCompositeList != 0 { + // Copy tag word + newSeg.writeRawPointer(newAddr, l.seg.readRawPointer(l.off-Address(wordSize))) + var ok bool + dst.off, ok = dst.off.addSize(wordSize) + if !ok { + return errOverflow + } + sz -= wordSize + } + if dst.flags&isBitList != 0 || dst.size.PointerCount == 0 { + end, _ := l.off.addSize(sz) // list was already validated + copy(newSeg.data[dst.off:], l.seg.data[l.off:end]) + } else { + for i := 0; i < l.Len(); i++ { + err := copyStruct(dst.Struct(i), l.Struct(i)) + if err != nil { + return err + } + } + } + l = dst + src = dst.ToPtr() + } + srcAddr = l.off + if l.flags&isCompositeList != 0 { + srcAddr -= Address(wordSize) + } + srcRaw = l.raw() + case interfacePtrType: + i := src.Interface() + if src.seg.msg != s.msg { + c := s.msg.AddCap(i.Client()) + i = NewInterface(s, c) + } + s.writeRawPointer(off, i.value(off)) + return nil + default: + panic("unreachable") + } + + switch { + case src.seg == s: + // Common case: src is in same segment as pointer. + // Use a near pointer. + s.writeRawPointer(off, srcRaw.withOffset(nearPointerOffset(off, srcAddr))) + return nil + case hasCapacity(src.seg.data, wordSize): + // Enough room adjacent to src to write a far pointer landing pad. + _, padAddr, _ := alloc(src.seg, wordSize) + src.seg.writeRawPointer(padAddr, srcRaw.withOffset(nearPointerOffset(padAddr, srcAddr))) + s.writeRawPointer(off, rawFarPointer(src.seg.id, padAddr)) + return nil + default: + // Not enough room for a landing pad, need to use a double-far pointer. + padSeg, padAddr, err := alloc(s, wordSize*2) + if err != nil { + return err + } + padSeg.writeRawPointer(padAddr, rawFarPointer(src.seg.id, srcAddr)) + padSeg.writeRawPointer(padAddr+Address(wordSize), srcRaw) + s.writeRawPointer(off, rawDoubleFarPointer(padSeg.id, padAddr)) + return nil + } +} + +var ( + errPointerAddress = errors.New("capnp: invalid pointer address") + errBadLandingPad = errors.New("capnp: invalid far pointer landing pad") + errBadTag = errors.New("capnp: invalid tag word") + errOtherPointer = errors.New("capnp: unknown pointer type") + errObjectSize = errors.New("capnp: invalid object size") + errElementSize = errors.New("capnp: mismatched list element size") + errReadLimit = errors.New("capnp: read traversal limit reached") + errDepthLimit = errors.New("capnp: depth limit reached") +) + +var ( + errOverflow = errors.New("capnp: address or size overflow") + errOutOfBounds = errors.New("capnp: address out of bounds") + errCopyDepth = errors.New("capnp: copy depth too large") + errOverlap = errors.New("capnp: overlapping data on copy") + errListSize = errors.New("capnp: invalid list size") +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/doc.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/doc.go new file mode 100644 index 00000000..ef7f6497 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/doc.go @@ -0,0 +1,384 @@ +/* +Package capnp is a Cap'n Proto library for Go. +https://capnproto.org/ + +Read the Getting Started guide for a tutorial on how to use this +package. https://github.com/capnproto/go-capnproto2/wiki/Getting-Started + +Generating code + +capnpc-go provides the compiler backend for capnp. + + # First, install capnpc-go to $PATH. + go install zombiezen.com/go/capnproto2/capnpc-go + # Then, generate Go files. + capnp compile -I$GOPATH/src/zombiezen.com/go/capnproto2/std -ogo *.capnp + +capnpc-go requires two annotations for all files: package and import. +package is needed to know what package to place at the head of the +generated file and what identifier to use when referring to the type +from another package. import should be the fully qualified import path +and is used to generate import statement from other packages and to +detect when two types are in the same package. For example: + + using Go = import "/go.capnp"; + $Go.package("main"); + $Go.import("zombiezen.com/go/capnproto2/example"); + +For adding documentation comments to the generated code, there's the doc +annotation. This annotation adds the comment to a struct, enum or field so +that godoc will pick it up. For example: + + struct Zdate $Go.doc("Zdate represents a calendar date") { + year @0 :Int16; + month @1 :UInt8; + day @2 :UInt8 ; + } + +Messages and Segments + +In Cap'n Proto, the unit of communication is a message. A message +consists of one or more segments -- contiguous blocks of memory. This +allows large messages to be split up and loaded independently or lazily. +Typically you will use one segment per message. Logically, a message is +organized in a tree of objects, with the root always being a struct (as +opposed to a list or primitive). Messages can be read from and written +to a stream. + +The Message and Segment types are the main types that application code +will use from this package. The Message type has methods for marshaling +and unmarshaling its segments to the wire format. If the application +needs to read or write from a stream, it should use the Encoder and +Decoder types. + +Pointers + +The type for a generic reference to a Cap'n Proto object is Ptr. A Ptr +can refer to a struct, a list, or an interface. Ptr, Struct, List, and +Interface (the pointer types) have value semantics and refer to data in +a single segment. All of the pointer types have a notion of "valid". +An invalid pointer will return the default value from any accessor and +panic when any setter is called. + +In previous versions of this package, the Pointer interface was used +instead of the Ptr struct. This interface and functions that use it are +now deprecated. See https://github.com/capnproto/go-capnproto2/wiki/New-Ptr-Type +for details about this API change. + +Data accessors and setters (i.e. struct primitive fields and list +elements) do not return errors, but pointer accessors and setters do. +There are a few reasons that a read or write of a pointer can fail, but +the most common are bad pointers or allocation failures. For accessors, +an invalid object will be returned in case of an error. + +Since Go doesn't have generics, wrapper types provide type safety on +lists. This package provides lists of basic types, and capnpc-go +generates list wrappers for named types. However, if you need to use +deeper nesting of lists (e.g. List(List(UInt8))), you will need to use a +PointerList and wrap the elements. + +Structs + +For the following schema: + +struct Foo @0x8423424e9b01c0af { + num @0 :UInt32; + bar @1 :Foo; +} + +capnpc-go will generate: + + // Foo is a pointer to a Foo struct in a segment. + // Member functions are provided to get/set members in the + // struct. + type Foo struct{ capnp.Struct } + + // Foo_TypeID is the unique identifier for the type Foo. + // It remains the same across languages and schema changes. + const Foo_TypeID = 0x8423424e9b01c0af + + // NewFoo creates a new orphaned Foo struct, preferring placement in + // s. If there isn't enough space, then another segment in the + // message will be used or allocated. You can set a field of type Foo + // to this new message, but usually you will want to use the + // NewBar()-style method shown below. + func NewFoo(s *capnp.Segment) (Foo, error) + + // NewRootFoo creates a new Foo struct and sets the message's root to + // it. + func NewRootFoo(s *capnp.Segment) (Foo, error) + + // ReadRootFoo reads the message's root pointer and converts it to a + // Foo struct. + func ReadRootFoo(msg *capnp.Message) (Foo, error) + + // Num returns the value of the num field. + func (s Foo) Num() uint32 + + // SetNum sets the value of the num field to v. + func (s Foo) SetNum(v uint32) + + // Bar returns the value of the bar field. This can return an error + // if the pointer goes beyond the segment's range, the segment fails + // to load, or the pointer recursion limit has been reached. + func (s Foo) Bar() (Foo, error) + + // HasBar reports whether the bar field was initialized (non-null). + func (s Foo) HasBar() bool + + // SetBar sets the value of the bar field to v. + func (s Foo) SetBar(v Foo) error + + // NewBar sets the bar field to a newly allocated Foo struct, + // preferring placement in s's segment. + func (s Foo) NewBar() (Foo, error) + + // Foo_List is a value with pointer semantics. It is created for all + // structs, and is used for List(Foo) in the capnp file. + type Foo_List struct{ capnp.List } + + // NewFoo_List creates a new orphaned List(Foo), preferring placement + // in s. This can then be added to a message by using a Set function + // which takes a Foo_List. sz specifies the number of elements in the + // list. The list's size cannot be changed after creation. + func NewFoo_List(s *capnp.Segment, sz int32) Foo_List + + // Len returns the number of elements in the list. + func (s Foo_List) Len() int + + // At returns a pointer to the i'th element. If i is an invalid index, + // this will return an invalid Foo (all getters will return default + // values, setters will fail). + func (s Foo_List) At(i int) Foo + + // Foo_Promise is a promise for a Foo. Methods are provided to get + // promises of struct and interface fields. + type Foo_Promise struct{ *capnp.Pipeline } + + // Get waits until the promise is resolved and returns the result. + func (p Foo_Promise) Get() (Foo, error) + + // Bar returns a promise for that bar field. + func (p Foo_Promise) Bar() Foo_Promise + + +Groups + +For each group a typedef is created with a different method set for just the +groups fields: + + struct Foo { + group :Group { + field @0 :Bool; + } + } + +generates the following: + + type Foo struct{ capnp.Struct } + type Foo_group Foo + + func (s Foo) Group() Foo_group + func (s Foo_group) Field() bool + +That way the following may be used to access a field in a group: + + var f Foo + value := f.Group().Field() + +Note that group accessors just convert the type and so have no overhead. + +Unions + +Named unions are treated as a group with an inner unnamed union. Unnamed +unions generate an enum Type_Which and a corresponding Which() function: + + struct Foo { + union { + a @0 :Bool; + b @1 :Bool; + } + } + +generates the following: + + type Foo_Which uint16 + + const ( + Foo_Which_a Foo_Which = 0 + Foo_Which_b Foo_Which = 1 + ) + + func (s Foo) A() bool + func (s Foo) B() bool + func (s Foo) SetA(v bool) + func (s Foo) SetB(v bool) + func (s Foo) Which() Foo_Which + +Which() should be checked before using the getters, and the default case must +always be handled. + +Setters for single values will set the union discriminator as well as set the +value. + +For voids in unions, there is a void setter that just sets the discriminator. +For example: + + struct Foo { + union { + a @0 :Void; + b @1 :Void; + } + } + +generates the following: + + func (s Foo) SetA() // Set that we are using A + func (s Foo) SetB() // Set that we are using B + +Similarly, for groups in unions, there is a group setter that just sets +the discriminator. This must be called before the group getter can be +used to set values. For example: + + struct Foo { + union { + a :group { + v :Bool + } + b :group { + v :Bool + } + } + } + +and in usage: + + f.SetA() // Set that we are using group A + f.A().SetV(true) // then we can use the group A getter to set the inner values + +Enums + +capnpc-go generates enum values as constants. For example in the capnp file: + + enum ElementSize { + empty @0; + bit @1; + byte @2; + twoBytes @3; + fourBytes @4; + eightBytes @5; + pointer @6; + inlineComposite @7; + } + +In the generated capnp.go file: + + type ElementSize uint16 + + const ( + ElementSize_empty ElementSize = 0 + ElementSize_bit ElementSize = 1 + ElementSize_byte ElementSize = 2 + ElementSize_twoBytes ElementSize = 3 + ElementSize_fourBytes ElementSize = 4 + ElementSize_eightBytes ElementSize = 5 + ElementSize_pointer ElementSize = 6 + ElementSize_inlineComposite ElementSize = 7 + ) + +In addition an enum.String() function is generated that will convert the constants to a string +for debugging or logging purposes. By default, the enum name is used as the tag value, +but the tags can be customized with a $Go.tag or $Go.notag annotation. + +For example: + + enum ElementSize { + empty @0 $Go.tag("void"); + bit @1 $Go.tag("1 bit"); + byte @2 $Go.tag("8 bits"); + inlineComposite @7 $Go.notag; + } + +In the generated go file: + + func (c ElementSize) String() string { + switch c { + case ElementSize_empty: + return "void" + case ElementSize_bit: + return "1 bit" + case ElementSize_byte: + return "8 bits" + default: + return "" + } + } + +Interfaces + +capnpc-go generates type-safe Client wrappers for interfaces. For parameter +lists and result lists, structs are generated as described above with the names +Interface_method_Params and Interface_method_Results, unless a single struct +type is used. For example, for this interface: + + interface Calculator { + evaluate @0 (expression :Expression) -> (value :Value); + } + +capnpc-go generates the following Go code (along with the structs +Calculator_evaluate_Params and Calculator_evaluate_Results): + + // Calculator is a client to a Calculator interface. + type Calculator struct{ Client capnp.Client } + + // Evaluate calls `evaluate` on the client. params is called on a newly + // allocated Calculator_evaluate_Params struct to fill in the parameters. + func (c Calculator) Evaluate( + ctx context.Context, + params func(Calculator_evaluate_Params) error, + opts ...capnp.CallOption) *Calculator_evaluate_Results_Promise + +capnpc-go also generates code to implement the interface: + + // A Calculator_Server implements the Calculator interface. + type Calculator_Server interface { + Evaluate(Calculator_evaluate_Call) error + } + + // Calculator_evaluate_Call holds the arguments for a Calculator.evaluate server call. + type Calculator_evaluate_Call struct { + Ctx context.Context + Options capnp.CallOptions + Params Calculator_evaluate_Params + Results Calculator_evaluate_Results + } + + // Calculator_ServerToClient is equivalent to calling: + // NewCalculator(capnp.NewServer(Calculator_Methods(nil, s), s)) + // If s does not implement the Close method, then nil is used. + func Calculator_ServerToClient(s Calculator_Server) Calculator + + // Calculator_Methods appends methods from Calculator that call to server and + // returns the methods. If methods is nil or the capacity of the underlying + // slice is too small, a new slice is returned. + func Calculator_Methods(methods []server.Method, s Calculator_Server) []server.Method + +Since a single capability may want to implement many interfaces, you can +use multiple *_Methods functions to build a single slice to send to +NewServer. + +An example of combining the client/server code to communicate with a locally +implemented Calculator: + + var srv Calculator_Server + calc := Calculator_ServerToClient(srv) + result := calc.Evaluate(ctx, func(params Calculator_evaluate_Params) { + params.SetExpression(expr) + }) + val := result.Value().Get() + +A note about message ordering: when implementing a server method, you +are responsible for acknowledging delivery of a method call. Failure to +do so can cause deadlocks. See the server.Ack function for more details. +*/ +package capnp // import "zombiezen.com/go/capnproto2" diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/BUILD.bazel b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/BUILD.bazel new file mode 100644 index 00000000..ec355034 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["marshal.go"], + visibility = ["//visibility:public"], + deps = [ + "//:go_default_library", + "//internal/nodemap:go_default_library", + "//internal/schema:go_default_library", + "//internal/strquote:go_default_library", + "//schemas:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["marshal_test.go"], + data = glob(["testdata/**"]), + library = ":go_default_library", + deps = [ + "//:go_default_library", + "//internal/schema:go_default_library", + "//schemas:go_default_library", + ], +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/marshal.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/marshal.go new file mode 100644 index 00000000..48cdaf57 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/marshal.go @@ -0,0 +1,455 @@ +// Package text supports marshaling Cap'n Proto messages as text based on a schema. +package text + +import ( + "bytes" + "fmt" + "io" + "math" + "strconv" + + "zombiezen.com/go/capnproto2" + "zombiezen.com/go/capnproto2/internal/nodemap" + "zombiezen.com/go/capnproto2/internal/schema" + "zombiezen.com/go/capnproto2/internal/strquote" + "zombiezen.com/go/capnproto2/schemas" +) + +// Marker strings. +const ( + voidMarker = "void" + interfaceMarker = "" + anyPointerMarker = "" +) + +// Marshal returns the text representation of a struct. +func Marshal(typeID uint64, s capnp.Struct) (string, error) { + buf := new(bytes.Buffer) + if err := NewEncoder(buf).Encode(typeID, s); err != nil { + return "", err + } + return buf.String(), nil +} + +// MarshalList returns the text representation of a struct list. +func MarshalList(typeID uint64, l capnp.List) (string, error) { + buf := new(bytes.Buffer) + if err := NewEncoder(buf).EncodeList(typeID, l); err != nil { + return "", err + } + return buf.String(), nil +} + +// An Encoder writes the text format of Cap'n Proto messages to an output stream. +type Encoder struct { + w errWriter + tmp []byte + nodes nodemap.Map +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: errWriter{w: w}} +} + +// UseRegistry changes the registry that the encoder consults for +// schemas from the default registry. +func (enc *Encoder) UseRegistry(reg *schemas.Registry) { + enc.nodes.UseRegistry(reg) +} + +// Encode writes the text representation of s to the stream. +func (enc *Encoder) Encode(typeID uint64, s capnp.Struct) error { + if enc.w.err != nil { + return enc.w.err + } + err := enc.marshalStruct(typeID, s) + if err != nil { + return err + } + return enc.w.err +} + +// EncodeList writes the text representation of struct list l to the stream. +func (enc *Encoder) EncodeList(typeID uint64, l capnp.List) error { + _, seg, _ := capnp.NewMessage(capnp.SingleSegment(nil)) + typ, _ := schema.NewRootType(seg) + typ.SetStructType() + typ.StructType().SetTypeId(typeID) + return enc.marshalList(typ, l) +} + +func (enc *Encoder) marshalBool(v bool) { + if v { + enc.w.WriteString("true") + } else { + enc.w.WriteString("false") + } +} + +func (enc *Encoder) marshalInt(i int64) { + enc.tmp = strconv.AppendInt(enc.tmp[:0], i, 10) + enc.w.Write(enc.tmp) +} + +func (enc *Encoder) marshalUint(i uint64) { + enc.tmp = strconv.AppendUint(enc.tmp[:0], i, 10) + enc.w.Write(enc.tmp) +} + +func (enc *Encoder) marshalFloat32(f float32) { + enc.tmp = strconv.AppendFloat(enc.tmp[:0], float64(f), 'g', -1, 32) + enc.w.Write(enc.tmp) +} + +func (enc *Encoder) marshalFloat64(f float64) { + enc.tmp = strconv.AppendFloat(enc.tmp[:0], f, 'g', -1, 64) + enc.w.Write(enc.tmp) +} + +func (enc *Encoder) marshalText(t []byte) { + enc.tmp = strquote.Append(enc.tmp[:0], t) + enc.w.Write(enc.tmp) +} + +func needsEscape(b byte) bool { + return b < 0x20 || b >= 0x7f +} + +func hexDigit(b byte) byte { + const digits = "0123456789abcdef" + return digits[b] +} + +func (enc *Encoder) marshalStruct(typeID uint64, s capnp.Struct) error { + n, err := enc.nodes.Find(typeID) + if err != nil { + return err + } + if !n.IsValid() || n.Which() != schema.Node_Which_structNode { + return fmt.Errorf("cannot find struct type %#x", typeID) + } + var discriminant uint16 + if n.StructNode().DiscriminantCount() > 0 { + discriminant = s.Uint16(capnp.DataOffset(n.StructNode().DiscriminantOffset() * 2)) + } + enc.w.WriteByte('(') + fields := codeOrderFields(n.StructNode()) + first := true + for _, f := range fields { + if !(f.Which() == schema.Field_Which_slot || f.Which() == schema.Field_Which_group) { + continue + } + if dv := f.DiscriminantValue(); !(dv == schema.Field_noDiscriminant || dv == discriminant) { + continue + } + if !first { + enc.w.WriteString(", ") + } + first = false + name, err := f.NameBytes() + if err != nil { + return err + } + enc.w.Write(name) + enc.w.WriteString(" = ") + switch f.Which() { + case schema.Field_Which_slot: + if err := enc.marshalFieldValue(s, f); err != nil { + return err + } + case schema.Field_Which_group: + if err := enc.marshalStruct(f.Group().TypeId(), s); err != nil { + return err + } + } + } + enc.w.WriteByte(')') + return nil +} + +func (enc *Encoder) marshalFieldValue(s capnp.Struct, f schema.Field) error { + typ, err := f.Slot().Type() + if err != nil { + return err + } + dv, err := f.Slot().DefaultValue() + if err != nil { + return err + } + if dv.IsValid() && int(typ.Which()) != int(dv.Which()) { + name, _ := f.Name() + return fmt.Errorf("marshal field %s: default value is a %v, want %v", name, dv.Which(), typ.Which()) + } + switch typ.Which() { + case schema.Type_Which_void: + enc.w.WriteString(voidMarker) + case schema.Type_Which_bool: + v := s.Bit(capnp.BitOffset(f.Slot().Offset())) + d := dv.Bool() + enc.marshalBool(!d && v || d && !v) + case schema.Type_Which_int8: + v := s.Uint8(capnp.DataOffset(f.Slot().Offset())) + d := uint8(dv.Int8()) + enc.marshalInt(int64(int8(v ^ d))) + case schema.Type_Which_int16: + v := s.Uint16(capnp.DataOffset(f.Slot().Offset() * 2)) + d := uint16(dv.Int16()) + enc.marshalInt(int64(int16(v ^ d))) + case schema.Type_Which_int32: + v := s.Uint32(capnp.DataOffset(f.Slot().Offset() * 4)) + d := uint32(dv.Int32()) + enc.marshalInt(int64(int32(v ^ d))) + case schema.Type_Which_int64: + v := s.Uint64(capnp.DataOffset(f.Slot().Offset() * 8)) + d := uint64(dv.Int64()) + enc.marshalInt(int64(v ^ d)) + case schema.Type_Which_uint8: + v := s.Uint8(capnp.DataOffset(f.Slot().Offset())) + d := dv.Uint8() + enc.marshalUint(uint64(v ^ d)) + case schema.Type_Which_uint16: + v := s.Uint16(capnp.DataOffset(f.Slot().Offset() * 2)) + d := dv.Uint16() + enc.marshalUint(uint64(v ^ d)) + case schema.Type_Which_uint32: + v := s.Uint32(capnp.DataOffset(f.Slot().Offset() * 4)) + d := dv.Uint32() + enc.marshalUint(uint64(v ^ d)) + case schema.Type_Which_uint64: + v := s.Uint64(capnp.DataOffset(f.Slot().Offset() * 8)) + d := dv.Uint64() + enc.marshalUint(v ^ d) + case schema.Type_Which_float32: + v := s.Uint32(capnp.DataOffset(f.Slot().Offset() * 4)) + d := math.Float32bits(dv.Float32()) + enc.marshalFloat32(math.Float32frombits(v ^ d)) + case schema.Type_Which_float64: + v := s.Uint64(capnp.DataOffset(f.Slot().Offset() * 8)) + d := math.Float64bits(dv.Float64()) + enc.marshalFloat64(math.Float64frombits(v ^ d)) + case schema.Type_Which_structType: + p, err := s.Ptr(uint16(f.Slot().Offset())) + if err != nil { + return err + } + if !p.IsValid() { + p, _ = dv.StructValuePtr() + } + return enc.marshalStruct(typ.StructType().TypeId(), p.Struct()) + case schema.Type_Which_data: + p, err := s.Ptr(uint16(f.Slot().Offset())) + if err != nil { + return err + } + if !p.IsValid() { + b, _ := dv.Data() + enc.marshalText(b) + return nil + } + enc.marshalText(p.Data()) + case schema.Type_Which_text: + p, err := s.Ptr(uint16(f.Slot().Offset())) + if err != nil { + return err + } + if !p.IsValid() { + b, _ := dv.TextBytes() + enc.marshalText(b) + return nil + } + enc.marshalText(p.TextBytes()) + case schema.Type_Which_list: + elem, err := typ.List().ElementType() + if err != nil { + return err + } + p, err := s.Ptr(uint16(f.Slot().Offset())) + if err != nil { + return err + } + if !p.IsValid() { + p, _ = dv.ListPtr() + } + return enc.marshalList(elem, p.List()) + case schema.Type_Which_enum: + v := s.Uint16(capnp.DataOffset(f.Slot().Offset() * 2)) + d := dv.Uint16() + return enc.marshalEnum(typ.Enum().TypeId(), v^d) + case schema.Type_Which_interface: + enc.w.WriteString(interfaceMarker) + case schema.Type_Which_anyPointer: + enc.w.WriteString(anyPointerMarker) + default: + return fmt.Errorf("unknown field type %v", typ.Which()) + } + return nil +} + +func codeOrderFields(s schema.Node_structNode) []schema.Field { + list, _ := s.Fields() + n := list.Len() + fields := make([]schema.Field, n) + for i := 0; i < n; i++ { + f := list.At(i) + fields[f.CodeOrder()] = f + } + return fields +} + +func (enc *Encoder) marshalList(elem schema.Type, l capnp.List) error { + switch elem.Which() { + case schema.Type_Which_void: + enc.w.WriteString(capnp.VoidList{List: l}.String()) + case schema.Type_Which_bool: + enc.w.WriteString(capnp.BitList{List: l}.String()) + case schema.Type_Which_int8: + enc.w.WriteString(capnp.Int8List{List: l}.String()) + case schema.Type_Which_int16: + enc.w.WriteString(capnp.Int16List{List: l}.String()) + case schema.Type_Which_int32: + enc.w.WriteString(capnp.Int32List{List: l}.String()) + case schema.Type_Which_int64: + enc.w.WriteString(capnp.Int64List{List: l}.String()) + case schema.Type_Which_uint8: + enc.w.WriteString(capnp.UInt8List{List: l}.String()) + case schema.Type_Which_uint16: + enc.w.WriteString(capnp.UInt16List{List: l}.String()) + case schema.Type_Which_uint32: + enc.w.WriteString(capnp.UInt32List{List: l}.String()) + case schema.Type_Which_uint64: + enc.w.WriteString(capnp.UInt64List{List: l}.String()) + case schema.Type_Which_float32: + enc.w.WriteString(capnp.Float32List{List: l}.String()) + case schema.Type_Which_float64: + enc.w.WriteString(capnp.Float64List{List: l}.String()) + case schema.Type_Which_data: + enc.w.WriteString(capnp.DataList{List: l}.String()) + case schema.Type_Which_text: + enc.w.WriteString(capnp.TextList{List: l}.String()) + case schema.Type_Which_structType: + enc.w.WriteByte('[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + err := enc.marshalStruct(elem.StructType().TypeId(), l.Struct(i)) + if err != nil { + return err + } + } + enc.w.WriteByte(']') + case schema.Type_Which_list: + enc.w.WriteByte('[') + ee, err := elem.List().ElementType() + if err != nil { + return err + } + for i := 0; i < l.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + p, err := capnp.PointerList{List: l}.PtrAt(i) + if err != nil { + return err + } + err = enc.marshalList(ee, p.List()) + if err != nil { + return err + } + } + enc.w.WriteByte(']') + case schema.Type_Which_enum: + enc.w.WriteByte('[') + il := capnp.UInt16List{List: l} + typ := elem.Enum().TypeId() + // TODO(light): only search for node once + for i := 0; i < il.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + enc.marshalEnum(typ, il.At(i)) + } + enc.w.WriteByte(']') + case schema.Type_Which_interface: + enc.w.WriteByte('[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + enc.w.WriteString(interfaceMarker) + } + enc.w.WriteByte(']') + case schema.Type_Which_anyPointer: + enc.w.WriteByte('[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + enc.w.WriteString(anyPointerMarker) + } + enc.w.WriteByte(']') + default: + return fmt.Errorf("unknown list type %v", elem.Which()) + } + return nil +} + +func (enc *Encoder) marshalEnum(typ uint64, val uint16) error { + n, err := enc.nodes.Find(typ) + if err != nil { + return err + } + if n.Which() != schema.Node_Which_enum { + return fmt.Errorf("marshaling enum of type @%#x: type is not an enum", typ) + } + enums, err := n.Enum().Enumerants() + if err != nil { + return err + } + if int(val) >= enums.Len() { + enc.marshalUint(uint64(val)) + return nil + } + name, err := enums.At(int(val)).NameBytes() + if err != nil { + return err + } + enc.w.Write(name) + return nil +} + +type errWriter struct { + w io.Writer + err error +} + +func (ew *errWriter) Write(p []byte) (int, error) { + if ew.err != nil { + return 0, ew.err + } + var n int + n, ew.err = ew.w.Write(p) + return n, ew.err +} + +func (ew *errWriter) WriteString(s string) (int, error) { + if ew.err != nil { + return 0, ew.err + } + var n int + n, ew.err = io.WriteString(ew.w, s) + return n, ew.err +} + +func (ew *errWriter) WriteByte(b byte) error { + if ew.err != nil { + return ew.err + } + if bw, ok := ew.w.(io.ByteWriter); ok { + ew.err = bw.WriteByte(b) + } else { + _, ew.err = ew.w.Write([]byte{b}) + } + return ew.err +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/go.capnp.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/go.capnp.go new file mode 100644 index 00000000..28f18bab --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/go.capnp.go @@ -0,0 +1,45 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package capnp + +import ( + schemas "zombiezen.com/go/capnproto2/schemas" +) + +const Package = uint64(0xbea97f1023792be0) +const Import = uint64(0xe130b601260e44b5) +const Doc = uint64(0xc58ad6bd519f935e) +const Tag = uint64(0xa574b41924caefc7) +const Notag = uint64(0xc8768679ec52e012) +const Customtype = uint64(0xfa10659ae02f2093) +const Name = uint64(0xc2b96012172f8df1) +const schema_d12a1c51fedd6c88 = "x\xda\x12\x98\xe2\xc0d\xc8z\x9c\x89\x81!P\x81\x95" + + "\xed\xff\xf1\xf7\xa7T$\xb7\x94,e\x08\xe4e\xe5\xf8" + + "\xdf\x91s\xf7_\xa0\x8c\xd6E\x06\x06FaO\xc6." + + "\xe1@Fv\x06\x86`\x1fFfF\x06\xc6\xff\x0f\xb4" + + "+\x95\x05\xeaW\xee\x03)eDQj\xcb\xb8J\xd8" + + "\x15\xac\xd4\x01\xa2\xf4c\xaf\xbe\xb8P\xc2\xceC\x0c\x17" + + "yY\xff\xf1\xa3\xa85d\x9c$l\x09Vk\x02Q" + + "\x1b7y~\xe0\xdek]GA\xc6\x9a\xa0(Ue" + + "\xec\x12\xd6\x05+\xd5\x80(\x15z\x10\xf4\xa6\xb2\xad\xec" + + "\x04\xa6c%\x19g\x09+\x82\x95\xca@\x94nu\xe1" + + "Sc\xdcf\xf0\x10\xd3\xb1\xbc\x8c\x8b\x84E\xc1J\x05" + + " J'+\xe8?\x98\x95*\xf0\x0b\xa4T\x01E)" + + "#\xe3!aN\xb0R\x16\x90R\x9e\xff\xc5%)\xfa" + + "\xe9\xf9z\xc9\x8c\x89\x05y\x05V%\x89\xe9\x0c\x0c\x01" + + "\x8c\x8c\x8c<\x0cLhR\x05\x89\xc9\xfc\xd9\x89\xe9\xa9" + + "\xd8e\xf3\x12s\x19qH\xa5\xe4'\xe323/\xbf" + + "\x8491=\x80\x91\x91\x81\x19M&3\xb7\x80=\xbf" + + "\xa8\x04]\x1b\x13X2\xb9\xb4\xb8$?\xb7\xa4\xb2 " + + "\x15f. \x00\x00\xff\xff\x89\xff\x94\xdf" + +func init() { + schemas.Register(schema_d12a1c51fedd6c88, + 0xa574b41924caefc7, + 0xbea97f1023792be0, + 0xc2b96012172f8df1, + 0xc58ad6bd519f935e, + 0xc8768679ec52e012, + 0xe130b601260e44b5, + 0xfa10659ae02f2093) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/BUILD.bazel b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/BUILD.bazel new file mode 100644 index 00000000..b1c7bc61 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["nodemap.go"], + visibility = ["//:__subpackages__"], + deps = [ + "//:go_default_library", + "//internal/schema:go_default_library", + "//schemas:go_default_library", + ], +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/nodemap.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/nodemap.go new file mode 100644 index 00000000..bcacffc7 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/nodemap.go @@ -0,0 +1,58 @@ +// Package nodemap provides a schema registry index type. +package nodemap + +import ( + "zombiezen.com/go/capnproto2" + "zombiezen.com/go/capnproto2/internal/schema" + "zombiezen.com/go/capnproto2/schemas" +) + +// Map is a lazy index of a registry. +// The zero value is an index of the default registry. +type Map struct { + reg *schemas.Registry + nodes map[uint64]schema.Node +} + +func (m *Map) registry() *schemas.Registry { + if m.reg != nil { + return m.reg + } + return &schemas.DefaultRegistry +} + +func (m *Map) UseRegistry(reg *schemas.Registry) { + m.reg = reg + m.nodes = make(map[uint64]schema.Node) +} + +// Find returns the node for the given ID. +func (m *Map) Find(id uint64) (schema.Node, error) { + if n := m.nodes[id]; n.IsValid() { + return n, nil + } + data, err := m.registry().Find(id) + if err != nil { + return schema.Node{}, err + } + msg, err := capnp.Unmarshal(data) + if err != nil { + return schema.Node{}, err + } + req, err := schema.ReadRootCodeGeneratorRequest(msg) + if err != nil { + return schema.Node{}, err + } + nodes, err := req.Nodes() + if err != nil { + return schema.Node{}, err + } + if m.nodes == nil { + m.nodes = make(map[uint64]schema.Node) + } + for i := 0; i < nodes.Len(); i++ { + n := nodes.At(i) + m.nodes[n.Id()] = n + } + return m.nodes[id], nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/BUILD.bazel b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/BUILD.bazel new file mode 100644 index 00000000..00374851 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "discard.go", + "discard_go14.go", + "packed.go", + ], + visibility = ["//:__subpackages__"], +) + +go_test( + name = "go_default_test", + srcs = ["packed_test.go"], + data = glob(["testdata/**"]), + library = ":go_default_library", +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard.go new file mode 100644 index 00000000..6bf54628 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard.go @@ -0,0 +1,11 @@ +// +build go1.5 + +package packed + +import ( + "bufio" +) + +func discard(r *bufio.Reader, n int) { + r.Discard(n) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard_go14.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard_go14.go new file mode 100644 index 00000000..a42c391f --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard_go14.go @@ -0,0 +1,13 @@ +// +build !go1.5 + +package packed + +import ( + "bufio" + "io" + "io/ioutil" +) + +func discard(r *bufio.Reader, n int) { + io.CopyN(ioutil.Discard, r, int64(n)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/fuzz.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/fuzz.go new file mode 100644 index 00000000..8e10bf5b --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/fuzz.go @@ -0,0 +1,65 @@ +// +build gofuzz + +// Fuzz test harness. To run: +// go-fuzz-build zombiezen.com/go/capnproto2/internal/packed +// go-fuzz -bin=packed-fuzz.zip -workdir=internal/packed/testdata + +package packed + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" +) + +func Fuzz(data []byte) int { + result := 0 + + // Unpacked + if unpacked, err := Unpack(nil, data); err == nil { + checkRepack(unpacked) + result = 1 + } + + // Read + { + r := NewReader(bufio.NewReader(bytes.NewReader(data))) + if unpacked, err := ioutil.ReadAll(r); err == nil { + checkRepack(unpacked) + result = 1 + } + } + + // ReadWord + { + r := NewReader(bufio.NewReader(bytes.NewReader(data))) + var unpacked []byte + var err error + for { + n := len(unpacked) + unpacked = append(unpacked, 0, 0, 0, 0, 0, 0, 0, 0) + if err = r.ReadWord(unpacked[n:]); err != nil { + unpacked = unpacked[:n] + break + } + } + if err == io.EOF { + checkRepack(unpacked) + result = 1 + } + } + + return result +} + +func checkRepack(unpacked []byte) { + packed := Pack(nil, unpacked) + unpacked2, err := Unpack(nil, packed) + if err != nil { + panic("correctness: unpack, pack, unpack gives error: " + err.Error()) + } + if !bytes.Equal(unpacked, unpacked2) { + panic("correctness: unpack, pack, unpack gives different results") + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/packed.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/packed.go new file mode 100644 index 00000000..38573501 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/packed.go @@ -0,0 +1,334 @@ +// Package packed provides functions to read and write the "packed" +// compression scheme described at https://capnproto.org/encoding.html#packing. +package packed + +import ( + "bufio" + "errors" + "io" +) + +const wordSize = 8 + +// Special case tags. +const ( + zeroTag byte = 0x00 + unpackedTag byte = 0xff +) + +// Pack appends the packed version of src to dst and returns the +// resulting slice. len(src) must be a multiple of 8 or Pack panics. +func Pack(dst, src []byte) []byte { + if len(src)%wordSize != 0 { + panic("packed.Pack len(src) must be a multiple of 8") + } + var buf [wordSize]byte + for len(src) > 0 { + var hdr byte + n := 0 + for i := uint(0); i < wordSize; i++ { + if src[i] != 0 { + hdr |= 1 << i + buf[n] = src[i] + n++ + } + } + dst = append(dst, hdr) + dst = append(dst, buf[:n]...) + src = src[wordSize:] + + switch hdr { + case zeroTag: + z := min(numZeroWords(src), 0xff) + dst = append(dst, byte(z)) + src = src[z*wordSize:] + case unpackedTag: + i := 0 + end := min(len(src), 0xff*wordSize) + for i < end { + zeros := 0 + for _, b := range src[i : i+wordSize] { + if b == 0 { + zeros++ + } + } + + if zeros > 1 { + break + } + i += wordSize + } + + rawWords := byte(i / wordSize) + dst = append(dst, rawWords) + dst = append(dst, src[:i]...) + src = src[i:] + } + } + return dst +} + +// numZeroWords returns the number of leading zero words in b. +func numZeroWords(b []byte) int { + for i, bb := range b { + if bb != 0 { + return i / wordSize + } + } + return len(b) / wordSize +} + +// Unpack appends the unpacked version of src to dst and returns the +// resulting slice. +func Unpack(dst, src []byte) ([]byte, error) { + for len(src) > 0 { + tag := src[0] + src = src[1:] + + pstart := len(dst) + dst = allocWords(dst, 1) + p := dst[pstart : pstart+wordSize] + if len(src) >= wordSize { + i := 0 + nz := tag & 1 + p[0] = src[i] & -nz + i += int(nz) + nz = tag >> 1 & 1 + p[1] = src[i] & -nz + i += int(nz) + nz = tag >> 2 & 1 + p[2] = src[i] & -nz + i += int(nz) + nz = tag >> 3 & 1 + p[3] = src[i] & -nz + i += int(nz) + nz = tag >> 4 & 1 + p[4] = src[i] & -nz + i += int(nz) + nz = tag >> 5 & 1 + p[5] = src[i] & -nz + i += int(nz) + nz = tag >> 6 & 1 + p[6] = src[i] & -nz + i += int(nz) + nz = tag >> 7 & 1 + p[7] = src[i] & -nz + i += int(nz) + src = src[i:] + } else { + for i := uint(0); i < wordSize; i++ { + if tag&(1<= target { + pp := p[len(p):target] + for i := range pp { + pp[i] = 0 + } + return p[:target] + } + newcap := cap(p) + doublecap := newcap + newcap + if target > doublecap { + newcap = target + } else { + if len(p) < 1024 { + newcap = doublecap + } else { + for newcap < target { + newcap += newcap / 4 + } + } + } + pp := make([]byte, target, newcap) + copy(pp, p) + return pp +} + +// A Reader decompresses a packed byte stream. +type Reader struct { + // ReadWord state + rd *bufio.Reader + err error + zeroes int + literal int + + // Read state + word [wordSize]byte + wordIdx int +} + +// NewReader returns a reader that decompresses a packed stream from r. +func NewReader(r *bufio.Reader) *Reader { + return &Reader{rd: r, wordIdx: wordSize} +} + +func min(a, b int) int { + if b < a { + return b + } + return a +} + +// ReadWord decompresses the next word from the underlying stream. +func (r *Reader) ReadWord(p []byte) error { + if len(p) < wordSize { + return errors.New("packed: read word buffer too small") + } + r.wordIdx = wordSize // if the caller tries to call ReadWord and Read, don't give them partial words. + if r.err != nil { + err := r.err + r.err = nil + return err + } + p = p[:wordSize] + switch { + case r.zeroes > 0: + r.zeroes-- + for i := range p { + p[i] = 0 + } + return nil + case r.literal > 0: + r.literal-- + _, err := io.ReadFull(r.rd, p) + return err + } + + var tag byte + if r.rd.Buffered() < wordSize+1 { + var err error + tag, err = r.rd.ReadByte() + if err != nil { + return err + } + for i := range p { + p[i] = 0 + } + for i := uint(0); i < wordSize; i++ { + if tag&(1<> 1 & 1 + p[1] = b[i] & -nz + i += int(nz) + nz = tag >> 2 & 1 + p[2] = b[i] & -nz + i += int(nz) + nz = tag >> 3 & 1 + p[3] = b[i] & -nz + i += int(nz) + nz = tag >> 4 & 1 + p[4] = b[i] & -nz + i += int(nz) + nz = tag >> 5 & 1 + p[5] = b[i] & -nz + i += int(nz) + nz = tag >> 6 & 1 + p[6] = b[i] & -nz + i += int(nz) + nz = tag >> 7 & 1 + p[7] = b[i] & -nz + i += int(nz) + discard(r.rd, i) + } + switch tag { + case zeroTag: + z, err := r.rd.ReadByte() + if err == io.EOF { + r.err = io.ErrUnexpectedEOF + return nil + } else if err != nil { + r.err = err + return nil + } + r.zeroes = int(z) + case unpackedTag: + l, err := r.rd.ReadByte() + if err == io.EOF { + r.err = io.ErrUnexpectedEOF + return nil + } else if err != nil { + r.err = err + return nil + } + r.literal = int(l) + } + return nil +} + +// Read reads up to len(p) bytes into p. This will decompress whole +// words at a time, so mixing calls to Read and ReadWord may lead to +// bytes missing. +func (r *Reader) Read(p []byte) (n int, err error) { + if r.wordIdx < wordSize { + n = copy(p, r.word[r.wordIdx:]) + r.wordIdx += n + } + for n < len(p) { + if r.rd.Buffered() < wordSize+1 && n > 0 { + return n, nil + } + if len(p)-n >= wordSize { + err := r.ReadWord(p[n:]) + if err != nil { + return n, err + } + n += wordSize + } else { + err := r.ReadWord(r.word[:]) + if err != nil { + return n, err + } + r.wordIdx = copy(p[n:], r.word[:]) + n += r.wordIdx + } + } + return n, nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/BUILD.bazel b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/BUILD.bazel new file mode 100644 index 00000000..d1c1d0df --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/BUILD.bazel @@ -0,0 +1,8 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["schema.capnp.go"], + visibility = ["//:__subpackages__"], + deps = ["//:go_default_library"], +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/schema.capnp.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/schema.capnp.go new file mode 100644 index 00000000..d2937278 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/schema.capnp.go @@ -0,0 +1,3071 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package schema + +import ( + math "math" + strconv "strconv" + capnp "zombiezen.com/go/capnproto2" +) + +// Constants defined in schema.capnp. +const ( + Field_noDiscriminant = uint16(65535) +) + +type Node struct{ capnp.Struct } +type Node_structNode Node +type Node_enum Node +type Node_interface Node +type Node_const Node +type Node_annotation Node +type Node_Which uint16 + +const ( + Node_Which_file Node_Which = 0 + Node_Which_structNode Node_Which = 1 + Node_Which_enum Node_Which = 2 + Node_Which_interface Node_Which = 3 + Node_Which_const Node_Which = 4 + Node_Which_annotation Node_Which = 5 +) + +func (w Node_Which) String() string { + const s = "filestructNodeenuminterfaceconstannotation" + switch w { + case Node_Which_file: + return s[0:4] + case Node_Which_structNode: + return s[4:14] + case Node_Which_enum: + return s[14:18] + case Node_Which_interface: + return s[18:27] + case Node_Which_const: + return s[27:32] + case Node_Which_annotation: + return s[32:42] + + } + return "Node_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Node_TypeID is the unique identifier for the type Node. +const Node_TypeID = 0xe682ab4cf923a417 + +func NewNode(s *capnp.Segment) (Node, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 40, PointerCount: 6}) + return Node{st}, err +} + +func NewRootNode(s *capnp.Segment) (Node, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 40, PointerCount: 6}) + return Node{st}, err +} + +func ReadRootNode(msg *capnp.Message) (Node, error) { + root, err := msg.RootPtr() + return Node{root.Struct()}, err +} + +func (s Node) Which() Node_Which { + return Node_Which(s.Struct.Uint16(12)) +} +func (s Node) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s Node) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s Node) DisplayName() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Node) HasDisplayName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Node) DisplayNameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Node) SetDisplayName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Node) DisplayNamePrefixLength() uint32 { + return s.Struct.Uint32(8) +} + +func (s Node) SetDisplayNamePrefixLength(v uint32) { + s.Struct.SetUint32(8, v) +} + +func (s Node) ScopeId() uint64 { + return s.Struct.Uint64(16) +} + +func (s Node) SetScopeId(v uint64) { + s.Struct.SetUint64(16, v) +} + +func (s Node) Parameters() (Node_Parameter_List, error) { + p, err := s.Struct.Ptr(5) + return Node_Parameter_List{List: p.List()}, err +} + +func (s Node) HasParameters() bool { + p, err := s.Struct.Ptr(5) + return p.IsValid() || err != nil +} + +func (s Node) SetParameters(v Node_Parameter_List) error { + return s.Struct.SetPtr(5, v.List.ToPtr()) +} + +// NewParameters sets the parameters field to a newly +// allocated Node_Parameter_List, preferring placement in s's segment. +func (s Node) NewParameters(n int32) (Node_Parameter_List, error) { + l, err := NewNode_Parameter_List(s.Struct.Segment(), n) + if err != nil { + return Node_Parameter_List{}, err + } + err = s.Struct.SetPtr(5, l.List.ToPtr()) + return l, err +} + +func (s Node) IsGeneric() bool { + return s.Struct.Bit(288) +} + +func (s Node) SetIsGeneric(v bool) { + s.Struct.SetBit(288, v) +} + +func (s Node) NestedNodes() (Node_NestedNode_List, error) { + p, err := s.Struct.Ptr(1) + return Node_NestedNode_List{List: p.List()}, err +} + +func (s Node) HasNestedNodes() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Node) SetNestedNodes(v Node_NestedNode_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewNestedNodes sets the nestedNodes field to a newly +// allocated Node_NestedNode_List, preferring placement in s's segment. +func (s Node) NewNestedNodes(n int32) (Node_NestedNode_List, error) { + l, err := NewNode_NestedNode_List(s.Struct.Segment(), n) + if err != nil { + return Node_NestedNode_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +func (s Node) Annotations() (Annotation_List, error) { + p, err := s.Struct.Ptr(2) + return Annotation_List{List: p.List()}, err +} + +func (s Node) HasAnnotations() bool { + p, err := s.Struct.Ptr(2) + return p.IsValid() || err != nil +} + +func (s Node) SetAnnotations(v Annotation_List) error { + return s.Struct.SetPtr(2, v.List.ToPtr()) +} + +// NewAnnotations sets the annotations field to a newly +// allocated Annotation_List, preferring placement in s's segment. +func (s Node) NewAnnotations(n int32) (Annotation_List, error) { + l, err := NewAnnotation_List(s.Struct.Segment(), n) + if err != nil { + return Annotation_List{}, err + } + err = s.Struct.SetPtr(2, l.List.ToPtr()) + return l, err +} + +func (s Node) SetFile() { + s.Struct.SetUint16(12, 0) + +} + +func (s Node) StructNode() Node_structNode { return Node_structNode(s) } + +func (s Node) SetStructNode() { + s.Struct.SetUint16(12, 1) +} + +func (s Node_structNode) DataWordCount() uint16 { + return s.Struct.Uint16(14) +} + +func (s Node_structNode) SetDataWordCount(v uint16) { + s.Struct.SetUint16(14, v) +} + +func (s Node_structNode) PointerCount() uint16 { + return s.Struct.Uint16(24) +} + +func (s Node_structNode) SetPointerCount(v uint16) { + s.Struct.SetUint16(24, v) +} + +func (s Node_structNode) PreferredListEncoding() ElementSize { + return ElementSize(s.Struct.Uint16(26)) +} + +func (s Node_structNode) SetPreferredListEncoding(v ElementSize) { + s.Struct.SetUint16(26, uint16(v)) +} + +func (s Node_structNode) IsGroup() bool { + return s.Struct.Bit(224) +} + +func (s Node_structNode) SetIsGroup(v bool) { + s.Struct.SetBit(224, v) +} + +func (s Node_structNode) DiscriminantCount() uint16 { + return s.Struct.Uint16(30) +} + +func (s Node_structNode) SetDiscriminantCount(v uint16) { + s.Struct.SetUint16(30, v) +} + +func (s Node_structNode) DiscriminantOffset() uint32 { + return s.Struct.Uint32(32) +} + +func (s Node_structNode) SetDiscriminantOffset(v uint32) { + s.Struct.SetUint32(32, v) +} + +func (s Node_structNode) Fields() (Field_List, error) { + p, err := s.Struct.Ptr(3) + return Field_List{List: p.List()}, err +} + +func (s Node_structNode) HasFields() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_structNode) SetFields(v Field_List) error { + return s.Struct.SetPtr(3, v.List.ToPtr()) +} + +// NewFields sets the fields field to a newly +// allocated Field_List, preferring placement in s's segment. +func (s Node_structNode) NewFields(n int32) (Field_List, error) { + l, err := NewField_List(s.Struct.Segment(), n) + if err != nil { + return Field_List{}, err + } + err = s.Struct.SetPtr(3, l.List.ToPtr()) + return l, err +} + +func (s Node) Enum() Node_enum { return Node_enum(s) } + +func (s Node) SetEnum() { + s.Struct.SetUint16(12, 2) +} + +func (s Node_enum) Enumerants() (Enumerant_List, error) { + p, err := s.Struct.Ptr(3) + return Enumerant_List{List: p.List()}, err +} + +func (s Node_enum) HasEnumerants() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_enum) SetEnumerants(v Enumerant_List) error { + return s.Struct.SetPtr(3, v.List.ToPtr()) +} + +// NewEnumerants sets the enumerants field to a newly +// allocated Enumerant_List, preferring placement in s's segment. +func (s Node_enum) NewEnumerants(n int32) (Enumerant_List, error) { + l, err := NewEnumerant_List(s.Struct.Segment(), n) + if err != nil { + return Enumerant_List{}, err + } + err = s.Struct.SetPtr(3, l.List.ToPtr()) + return l, err +} + +func (s Node) Interface() Node_interface { return Node_interface(s) } + +func (s Node) SetInterface() { + s.Struct.SetUint16(12, 3) +} + +func (s Node_interface) Methods() (Method_List, error) { + p, err := s.Struct.Ptr(3) + return Method_List{List: p.List()}, err +} + +func (s Node_interface) HasMethods() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_interface) SetMethods(v Method_List) error { + return s.Struct.SetPtr(3, v.List.ToPtr()) +} + +// NewMethods sets the methods field to a newly +// allocated Method_List, preferring placement in s's segment. +func (s Node_interface) NewMethods(n int32) (Method_List, error) { + l, err := NewMethod_List(s.Struct.Segment(), n) + if err != nil { + return Method_List{}, err + } + err = s.Struct.SetPtr(3, l.List.ToPtr()) + return l, err +} + +func (s Node_interface) Superclasses() (Superclass_List, error) { + p, err := s.Struct.Ptr(4) + return Superclass_List{List: p.List()}, err +} + +func (s Node_interface) HasSuperclasses() bool { + p, err := s.Struct.Ptr(4) + return p.IsValid() || err != nil +} + +func (s Node_interface) SetSuperclasses(v Superclass_List) error { + return s.Struct.SetPtr(4, v.List.ToPtr()) +} + +// NewSuperclasses sets the superclasses field to a newly +// allocated Superclass_List, preferring placement in s's segment. +func (s Node_interface) NewSuperclasses(n int32) (Superclass_List, error) { + l, err := NewSuperclass_List(s.Struct.Segment(), n) + if err != nil { + return Superclass_List{}, err + } + err = s.Struct.SetPtr(4, l.List.ToPtr()) + return l, err +} + +func (s Node) Const() Node_const { return Node_const(s) } + +func (s Node) SetConst() { + s.Struct.SetUint16(12, 4) +} + +func (s Node_const) Type() (Type, error) { + p, err := s.Struct.Ptr(3) + return Type{Struct: p.Struct()}, err +} + +func (s Node_const) HasType() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_const) SetType(v Type) error { + return s.Struct.SetPtr(3, v.Struct.ToPtr()) +} + +// NewType sets the type field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Node_const) NewType() (Type, error) { + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(3, ss.Struct.ToPtr()) + return ss, err +} + +func (s Node_const) Value() (Value, error) { + p, err := s.Struct.Ptr(4) + return Value{Struct: p.Struct()}, err +} + +func (s Node_const) HasValue() bool { + p, err := s.Struct.Ptr(4) + return p.IsValid() || err != nil +} + +func (s Node_const) SetValue(v Value) error { + return s.Struct.SetPtr(4, v.Struct.ToPtr()) +} + +// NewValue sets the value field to a newly +// allocated Value struct, preferring placement in s's segment. +func (s Node_const) NewValue() (Value, error) { + ss, err := NewValue(s.Struct.Segment()) + if err != nil { + return Value{}, err + } + err = s.Struct.SetPtr(4, ss.Struct.ToPtr()) + return ss, err +} + +func (s Node) Annotation() Node_annotation { return Node_annotation(s) } + +func (s Node) SetAnnotation() { + s.Struct.SetUint16(12, 5) +} + +func (s Node_annotation) Type() (Type, error) { + p, err := s.Struct.Ptr(3) + return Type{Struct: p.Struct()}, err +} + +func (s Node_annotation) HasType() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_annotation) SetType(v Type) error { + return s.Struct.SetPtr(3, v.Struct.ToPtr()) +} + +// NewType sets the type field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Node_annotation) NewType() (Type, error) { + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(3, ss.Struct.ToPtr()) + return ss, err +} + +func (s Node_annotation) TargetsFile() bool { + return s.Struct.Bit(112) +} + +func (s Node_annotation) SetTargetsFile(v bool) { + s.Struct.SetBit(112, v) +} + +func (s Node_annotation) TargetsConst() bool { + return s.Struct.Bit(113) +} + +func (s Node_annotation) SetTargetsConst(v bool) { + s.Struct.SetBit(113, v) +} + +func (s Node_annotation) TargetsEnum() bool { + return s.Struct.Bit(114) +} + +func (s Node_annotation) SetTargetsEnum(v bool) { + s.Struct.SetBit(114, v) +} + +func (s Node_annotation) TargetsEnumerant() bool { + return s.Struct.Bit(115) +} + +func (s Node_annotation) SetTargetsEnumerant(v bool) { + s.Struct.SetBit(115, v) +} + +func (s Node_annotation) TargetsStruct() bool { + return s.Struct.Bit(116) +} + +func (s Node_annotation) SetTargetsStruct(v bool) { + s.Struct.SetBit(116, v) +} + +func (s Node_annotation) TargetsField() bool { + return s.Struct.Bit(117) +} + +func (s Node_annotation) SetTargetsField(v bool) { + s.Struct.SetBit(117, v) +} + +func (s Node_annotation) TargetsUnion() bool { + return s.Struct.Bit(118) +} + +func (s Node_annotation) SetTargetsUnion(v bool) { + s.Struct.SetBit(118, v) +} + +func (s Node_annotation) TargetsGroup() bool { + return s.Struct.Bit(119) +} + +func (s Node_annotation) SetTargetsGroup(v bool) { + s.Struct.SetBit(119, v) +} + +func (s Node_annotation) TargetsInterface() bool { + return s.Struct.Bit(120) +} + +func (s Node_annotation) SetTargetsInterface(v bool) { + s.Struct.SetBit(120, v) +} + +func (s Node_annotation) TargetsMethod() bool { + return s.Struct.Bit(121) +} + +func (s Node_annotation) SetTargetsMethod(v bool) { + s.Struct.SetBit(121, v) +} + +func (s Node_annotation) TargetsParam() bool { + return s.Struct.Bit(122) +} + +func (s Node_annotation) SetTargetsParam(v bool) { + s.Struct.SetBit(122, v) +} + +func (s Node_annotation) TargetsAnnotation() bool { + return s.Struct.Bit(123) +} + +func (s Node_annotation) SetTargetsAnnotation(v bool) { + s.Struct.SetBit(123, v) +} + +// Node_List is a list of Node. +type Node_List struct{ capnp.List } + +// NewNode creates a new list of Node. +func NewNode_List(s *capnp.Segment, sz int32) (Node_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 40, PointerCount: 6}, sz) + return Node_List{l}, err +} + +func (s Node_List) At(i int) Node { return Node{s.List.Struct(i)} } + +func (s Node_List) Set(i int, v Node) error { return s.List.SetStruct(i, v.Struct) } + +// Node_Promise is a wrapper for a Node promised by a client call. +type Node_Promise struct{ *capnp.Pipeline } + +func (p Node_Promise) Struct() (Node, error) { + s, err := p.Pipeline.Struct() + return Node{s}, err +} + +func (p Node_Promise) StructNode() Node_structNode_Promise { return Node_structNode_Promise{p.Pipeline} } + +// Node_structNode_Promise is a wrapper for a Node_structNode promised by a client call. +type Node_structNode_Promise struct{ *capnp.Pipeline } + +func (p Node_structNode_Promise) Struct() (Node_structNode, error) { + s, err := p.Pipeline.Struct() + return Node_structNode{s}, err +} + +func (p Node_Promise) Enum() Node_enum_Promise { return Node_enum_Promise{p.Pipeline} } + +// Node_enum_Promise is a wrapper for a Node_enum promised by a client call. +type Node_enum_Promise struct{ *capnp.Pipeline } + +func (p Node_enum_Promise) Struct() (Node_enum, error) { + s, err := p.Pipeline.Struct() + return Node_enum{s}, err +} + +func (p Node_Promise) Interface() Node_interface_Promise { return Node_interface_Promise{p.Pipeline} } + +// Node_interface_Promise is a wrapper for a Node_interface promised by a client call. +type Node_interface_Promise struct{ *capnp.Pipeline } + +func (p Node_interface_Promise) Struct() (Node_interface, error) { + s, err := p.Pipeline.Struct() + return Node_interface{s}, err +} + +func (p Node_Promise) Const() Node_const_Promise { return Node_const_Promise{p.Pipeline} } + +// Node_const_Promise is a wrapper for a Node_const promised by a client call. +type Node_const_Promise struct{ *capnp.Pipeline } + +func (p Node_const_Promise) Struct() (Node_const, error) { + s, err := p.Pipeline.Struct() + return Node_const{s}, err +} + +func (p Node_const_Promise) Type() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(3)} +} + +func (p Node_const_Promise) Value() Value_Promise { + return Value_Promise{Pipeline: p.Pipeline.GetPipeline(4)} +} + +func (p Node_Promise) Annotation() Node_annotation_Promise { return Node_annotation_Promise{p.Pipeline} } + +// Node_annotation_Promise is a wrapper for a Node_annotation promised by a client call. +type Node_annotation_Promise struct{ *capnp.Pipeline } + +func (p Node_annotation_Promise) Struct() (Node_annotation, error) { + s, err := p.Pipeline.Struct() + return Node_annotation{s}, err +} + +func (p Node_annotation_Promise) Type() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(3)} +} + +type Node_Parameter struct{ capnp.Struct } + +// Node_Parameter_TypeID is the unique identifier for the type Node_Parameter. +const Node_Parameter_TypeID = 0xb9521bccf10fa3b1 + +func NewNode_Parameter(s *capnp.Segment) (Node_Parameter, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Node_Parameter{st}, err +} + +func NewRootNode_Parameter(s *capnp.Segment) (Node_Parameter, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Node_Parameter{st}, err +} + +func ReadRootNode_Parameter(msg *capnp.Message) (Node_Parameter, error) { + root, err := msg.RootPtr() + return Node_Parameter{root.Struct()}, err +} + +func (s Node_Parameter) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Node_Parameter) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Node_Parameter) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Node_Parameter) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +// Node_Parameter_List is a list of Node_Parameter. +type Node_Parameter_List struct{ capnp.List } + +// NewNode_Parameter creates a new list of Node_Parameter. +func NewNode_Parameter_List(s *capnp.Segment, sz int32) (Node_Parameter_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return Node_Parameter_List{l}, err +} + +func (s Node_Parameter_List) At(i int) Node_Parameter { return Node_Parameter{s.List.Struct(i)} } + +func (s Node_Parameter_List) Set(i int, v Node_Parameter) error { return s.List.SetStruct(i, v.Struct) } + +// Node_Parameter_Promise is a wrapper for a Node_Parameter promised by a client call. +type Node_Parameter_Promise struct{ *capnp.Pipeline } + +func (p Node_Parameter_Promise) Struct() (Node_Parameter, error) { + s, err := p.Pipeline.Struct() + return Node_Parameter{s}, err +} + +type Node_NestedNode struct{ capnp.Struct } + +// Node_NestedNode_TypeID is the unique identifier for the type Node_NestedNode. +const Node_NestedNode_TypeID = 0xdebf55bbfa0fc242 + +func NewNode_NestedNode(s *capnp.Segment) (Node_NestedNode, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Node_NestedNode{st}, err +} + +func NewRootNode_NestedNode(s *capnp.Segment) (Node_NestedNode, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Node_NestedNode{st}, err +} + +func ReadRootNode_NestedNode(msg *capnp.Message) (Node_NestedNode, error) { + root, err := msg.RootPtr() + return Node_NestedNode{root.Struct()}, err +} + +func (s Node_NestedNode) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Node_NestedNode) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Node_NestedNode) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Node_NestedNode) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Node_NestedNode) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s Node_NestedNode) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +// Node_NestedNode_List is a list of Node_NestedNode. +type Node_NestedNode_List struct{ capnp.List } + +// NewNode_NestedNode creates a new list of Node_NestedNode. +func NewNode_NestedNode_List(s *capnp.Segment, sz int32) (Node_NestedNode_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz) + return Node_NestedNode_List{l}, err +} + +func (s Node_NestedNode_List) At(i int) Node_NestedNode { return Node_NestedNode{s.List.Struct(i)} } + +func (s Node_NestedNode_List) Set(i int, v Node_NestedNode) error { + return s.List.SetStruct(i, v.Struct) +} + +// Node_NestedNode_Promise is a wrapper for a Node_NestedNode promised by a client call. +type Node_NestedNode_Promise struct{ *capnp.Pipeline } + +func (p Node_NestedNode_Promise) Struct() (Node_NestedNode, error) { + s, err := p.Pipeline.Struct() + return Node_NestedNode{s}, err +} + +type Field struct{ capnp.Struct } +type Field_slot Field +type Field_group Field +type Field_ordinal Field +type Field_Which uint16 + +const ( + Field_Which_slot Field_Which = 0 + Field_Which_group Field_Which = 1 +) + +func (w Field_Which) String() string { + const s = "slotgroup" + switch w { + case Field_Which_slot: + return s[0:4] + case Field_Which_group: + return s[4:9] + + } + return "Field_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +type Field_ordinal_Which uint16 + +const ( + Field_ordinal_Which_implicit Field_ordinal_Which = 0 + Field_ordinal_Which_explicit Field_ordinal_Which = 1 +) + +func (w Field_ordinal_Which) String() string { + const s = "implicitexplicit" + switch w { + case Field_ordinal_Which_implicit: + return s[0:8] + case Field_ordinal_Which_explicit: + return s[8:16] + + } + return "Field_ordinal_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Field_TypeID is the unique identifier for the type Field. +const Field_TypeID = 0x9aad50a41f4af45f + +func NewField(s *capnp.Segment) (Field, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 4}) + return Field{st}, err +} + +func NewRootField(s *capnp.Segment) (Field, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 4}) + return Field{st}, err +} + +func ReadRootField(msg *capnp.Message) (Field, error) { + root, err := msg.RootPtr() + return Field{root.Struct()}, err +} + +func (s Field) Which() Field_Which { + return Field_Which(s.Struct.Uint16(8)) +} +func (s Field) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Field) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Field) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Field) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Field) CodeOrder() uint16 { + return s.Struct.Uint16(0) +} + +func (s Field) SetCodeOrder(v uint16) { + s.Struct.SetUint16(0, v) +} + +func (s Field) Annotations() (Annotation_List, error) { + p, err := s.Struct.Ptr(1) + return Annotation_List{List: p.List()}, err +} + +func (s Field) HasAnnotations() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Field) SetAnnotations(v Annotation_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewAnnotations sets the annotations field to a newly +// allocated Annotation_List, preferring placement in s's segment. +func (s Field) NewAnnotations(n int32) (Annotation_List, error) { + l, err := NewAnnotation_List(s.Struct.Segment(), n) + if err != nil { + return Annotation_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +func (s Field) DiscriminantValue() uint16 { + return s.Struct.Uint16(2) ^ 65535 +} + +func (s Field) SetDiscriminantValue(v uint16) { + s.Struct.SetUint16(2, v^65535) +} + +func (s Field) Slot() Field_slot { return Field_slot(s) } + +func (s Field) SetSlot() { + s.Struct.SetUint16(8, 0) +} + +func (s Field_slot) Offset() uint32 { + return s.Struct.Uint32(4) +} + +func (s Field_slot) SetOffset(v uint32) { + s.Struct.SetUint32(4, v) +} + +func (s Field_slot) Type() (Type, error) { + p, err := s.Struct.Ptr(2) + return Type{Struct: p.Struct()}, err +} + +func (s Field_slot) HasType() bool { + p, err := s.Struct.Ptr(2) + return p.IsValid() || err != nil +} + +func (s Field_slot) SetType(v Type) error { + return s.Struct.SetPtr(2, v.Struct.ToPtr()) +} + +// NewType sets the type field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Field_slot) NewType() (Type, error) { + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(2, ss.Struct.ToPtr()) + return ss, err +} + +func (s Field_slot) DefaultValue() (Value, error) { + p, err := s.Struct.Ptr(3) + return Value{Struct: p.Struct()}, err +} + +func (s Field_slot) HasDefaultValue() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Field_slot) SetDefaultValue(v Value) error { + return s.Struct.SetPtr(3, v.Struct.ToPtr()) +} + +// NewDefaultValue sets the defaultValue field to a newly +// allocated Value struct, preferring placement in s's segment. +func (s Field_slot) NewDefaultValue() (Value, error) { + ss, err := NewValue(s.Struct.Segment()) + if err != nil { + return Value{}, err + } + err = s.Struct.SetPtr(3, ss.Struct.ToPtr()) + return ss, err +} + +func (s Field_slot) HadExplicitDefault() bool { + return s.Struct.Bit(128) +} + +func (s Field_slot) SetHadExplicitDefault(v bool) { + s.Struct.SetBit(128, v) +} + +func (s Field) Group() Field_group { return Field_group(s) } + +func (s Field) SetGroup() { + s.Struct.SetUint16(8, 1) +} + +func (s Field_group) TypeId() uint64 { + return s.Struct.Uint64(16) +} + +func (s Field_group) SetTypeId(v uint64) { + s.Struct.SetUint64(16, v) +} + +func (s Field) Ordinal() Field_ordinal { return Field_ordinal(s) } + +func (s Field_ordinal) Which() Field_ordinal_Which { + return Field_ordinal_Which(s.Struct.Uint16(10)) +} +func (s Field_ordinal) SetImplicit() { + s.Struct.SetUint16(10, 0) + +} + +func (s Field_ordinal) Explicit() uint16 { + return s.Struct.Uint16(12) +} + +func (s Field_ordinal) SetExplicit(v uint16) { + s.Struct.SetUint16(10, 1) + s.Struct.SetUint16(12, v) +} + +// Field_List is a list of Field. +type Field_List struct{ capnp.List } + +// NewField creates a new list of Field. +func NewField_List(s *capnp.Segment, sz int32) (Field_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 4}, sz) + return Field_List{l}, err +} + +func (s Field_List) At(i int) Field { return Field{s.List.Struct(i)} } + +func (s Field_List) Set(i int, v Field) error { return s.List.SetStruct(i, v.Struct) } + +// Field_Promise is a wrapper for a Field promised by a client call. +type Field_Promise struct{ *capnp.Pipeline } + +func (p Field_Promise) Struct() (Field, error) { + s, err := p.Pipeline.Struct() + return Field{s}, err +} + +func (p Field_Promise) Slot() Field_slot_Promise { return Field_slot_Promise{p.Pipeline} } + +// Field_slot_Promise is a wrapper for a Field_slot promised by a client call. +type Field_slot_Promise struct{ *capnp.Pipeline } + +func (p Field_slot_Promise) Struct() (Field_slot, error) { + s, err := p.Pipeline.Struct() + return Field_slot{s}, err +} + +func (p Field_slot_Promise) Type() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(2)} +} + +func (p Field_slot_Promise) DefaultValue() Value_Promise { + return Value_Promise{Pipeline: p.Pipeline.GetPipeline(3)} +} + +func (p Field_Promise) Group() Field_group_Promise { return Field_group_Promise{p.Pipeline} } + +// Field_group_Promise is a wrapper for a Field_group promised by a client call. +type Field_group_Promise struct{ *capnp.Pipeline } + +func (p Field_group_Promise) Struct() (Field_group, error) { + s, err := p.Pipeline.Struct() + return Field_group{s}, err +} + +func (p Field_Promise) Ordinal() Field_ordinal_Promise { return Field_ordinal_Promise{p.Pipeline} } + +// Field_ordinal_Promise is a wrapper for a Field_ordinal promised by a client call. +type Field_ordinal_Promise struct{ *capnp.Pipeline } + +func (p Field_ordinal_Promise) Struct() (Field_ordinal, error) { + s, err := p.Pipeline.Struct() + return Field_ordinal{s}, err +} + +type Enumerant struct{ capnp.Struct } + +// Enumerant_TypeID is the unique identifier for the type Enumerant. +const Enumerant_TypeID = 0x978a7cebdc549a4d + +func NewEnumerant(s *capnp.Segment) (Enumerant, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Enumerant{st}, err +} + +func NewRootEnumerant(s *capnp.Segment) (Enumerant, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Enumerant{st}, err +} + +func ReadRootEnumerant(msg *capnp.Message) (Enumerant, error) { + root, err := msg.RootPtr() + return Enumerant{root.Struct()}, err +} + +func (s Enumerant) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Enumerant) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Enumerant) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Enumerant) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Enumerant) CodeOrder() uint16 { + return s.Struct.Uint16(0) +} + +func (s Enumerant) SetCodeOrder(v uint16) { + s.Struct.SetUint16(0, v) +} + +func (s Enumerant) Annotations() (Annotation_List, error) { + p, err := s.Struct.Ptr(1) + return Annotation_List{List: p.List()}, err +} + +func (s Enumerant) HasAnnotations() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Enumerant) SetAnnotations(v Annotation_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewAnnotations sets the annotations field to a newly +// allocated Annotation_List, preferring placement in s's segment. +func (s Enumerant) NewAnnotations(n int32) (Annotation_List, error) { + l, err := NewAnnotation_List(s.Struct.Segment(), n) + if err != nil { + return Annotation_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +// Enumerant_List is a list of Enumerant. +type Enumerant_List struct{ capnp.List } + +// NewEnumerant creates a new list of Enumerant. +func NewEnumerant_List(s *capnp.Segment, sz int32) (Enumerant_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz) + return Enumerant_List{l}, err +} + +func (s Enumerant_List) At(i int) Enumerant { return Enumerant{s.List.Struct(i)} } + +func (s Enumerant_List) Set(i int, v Enumerant) error { return s.List.SetStruct(i, v.Struct) } + +// Enumerant_Promise is a wrapper for a Enumerant promised by a client call. +type Enumerant_Promise struct{ *capnp.Pipeline } + +func (p Enumerant_Promise) Struct() (Enumerant, error) { + s, err := p.Pipeline.Struct() + return Enumerant{s}, err +} + +type Superclass struct{ capnp.Struct } + +// Superclass_TypeID is the unique identifier for the type Superclass. +const Superclass_TypeID = 0xa9962a9ed0a4d7f8 + +func NewSuperclass(s *capnp.Segment) (Superclass, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Superclass{st}, err +} + +func NewRootSuperclass(s *capnp.Segment) (Superclass, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Superclass{st}, err +} + +func ReadRootSuperclass(msg *capnp.Message) (Superclass, error) { + root, err := msg.RootPtr() + return Superclass{root.Struct()}, err +} + +func (s Superclass) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s Superclass) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s Superclass) Brand() (Brand, error) { + p, err := s.Struct.Ptr(0) + return Brand{Struct: p.Struct()}, err +} + +func (s Superclass) HasBrand() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Superclass) SetBrand(v Brand) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Superclass) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// Superclass_List is a list of Superclass. +type Superclass_List struct{ capnp.List } + +// NewSuperclass creates a new list of Superclass. +func NewSuperclass_List(s *capnp.Segment, sz int32) (Superclass_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz) + return Superclass_List{l}, err +} + +func (s Superclass_List) At(i int) Superclass { return Superclass{s.List.Struct(i)} } + +func (s Superclass_List) Set(i int, v Superclass) error { return s.List.SetStruct(i, v.Struct) } + +// Superclass_Promise is a wrapper for a Superclass promised by a client call. +type Superclass_Promise struct{ *capnp.Pipeline } + +func (p Superclass_Promise) Struct() (Superclass, error) { + s, err := p.Pipeline.Struct() + return Superclass{s}, err +} + +func (p Superclass_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +type Method struct{ capnp.Struct } + +// Method_TypeID is the unique identifier for the type Method. +const Method_TypeID = 0x9500cce23b334d80 + +func NewMethod(s *capnp.Segment) (Method, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 5}) + return Method{st}, err +} + +func NewRootMethod(s *capnp.Segment) (Method, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 5}) + return Method{st}, err +} + +func ReadRootMethod(msg *capnp.Message) (Method, error) { + root, err := msg.RootPtr() + return Method{root.Struct()}, err +} + +func (s Method) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Method) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Method) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Method) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Method) CodeOrder() uint16 { + return s.Struct.Uint16(0) +} + +func (s Method) SetCodeOrder(v uint16) { + s.Struct.SetUint16(0, v) +} + +func (s Method) ImplicitParameters() (Node_Parameter_List, error) { + p, err := s.Struct.Ptr(4) + return Node_Parameter_List{List: p.List()}, err +} + +func (s Method) HasImplicitParameters() bool { + p, err := s.Struct.Ptr(4) + return p.IsValid() || err != nil +} + +func (s Method) SetImplicitParameters(v Node_Parameter_List) error { + return s.Struct.SetPtr(4, v.List.ToPtr()) +} + +// NewImplicitParameters sets the implicitParameters field to a newly +// allocated Node_Parameter_List, preferring placement in s's segment. +func (s Method) NewImplicitParameters(n int32) (Node_Parameter_List, error) { + l, err := NewNode_Parameter_List(s.Struct.Segment(), n) + if err != nil { + return Node_Parameter_List{}, err + } + err = s.Struct.SetPtr(4, l.List.ToPtr()) + return l, err +} + +func (s Method) ParamStructType() uint64 { + return s.Struct.Uint64(8) +} + +func (s Method) SetParamStructType(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s Method) ParamBrand() (Brand, error) { + p, err := s.Struct.Ptr(2) + return Brand{Struct: p.Struct()}, err +} + +func (s Method) HasParamBrand() bool { + p, err := s.Struct.Ptr(2) + return p.IsValid() || err != nil +} + +func (s Method) SetParamBrand(v Brand) error { + return s.Struct.SetPtr(2, v.Struct.ToPtr()) +} + +// NewParamBrand sets the paramBrand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Method) NewParamBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(2, ss.Struct.ToPtr()) + return ss, err +} + +func (s Method) ResultStructType() uint64 { + return s.Struct.Uint64(16) +} + +func (s Method) SetResultStructType(v uint64) { + s.Struct.SetUint64(16, v) +} + +func (s Method) ResultBrand() (Brand, error) { + p, err := s.Struct.Ptr(3) + return Brand{Struct: p.Struct()}, err +} + +func (s Method) HasResultBrand() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Method) SetResultBrand(v Brand) error { + return s.Struct.SetPtr(3, v.Struct.ToPtr()) +} + +// NewResultBrand sets the resultBrand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Method) NewResultBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(3, ss.Struct.ToPtr()) + return ss, err +} + +func (s Method) Annotations() (Annotation_List, error) { + p, err := s.Struct.Ptr(1) + return Annotation_List{List: p.List()}, err +} + +func (s Method) HasAnnotations() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Method) SetAnnotations(v Annotation_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewAnnotations sets the annotations field to a newly +// allocated Annotation_List, preferring placement in s's segment. +func (s Method) NewAnnotations(n int32) (Annotation_List, error) { + l, err := NewAnnotation_List(s.Struct.Segment(), n) + if err != nil { + return Annotation_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +// Method_List is a list of Method. +type Method_List struct{ capnp.List } + +// NewMethod creates a new list of Method. +func NewMethod_List(s *capnp.Segment, sz int32) (Method_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 5}, sz) + return Method_List{l}, err +} + +func (s Method_List) At(i int) Method { return Method{s.List.Struct(i)} } + +func (s Method_List) Set(i int, v Method) error { return s.List.SetStruct(i, v.Struct) } + +// Method_Promise is a wrapper for a Method promised by a client call. +type Method_Promise struct{ *capnp.Pipeline } + +func (p Method_Promise) Struct() (Method, error) { + s, err := p.Pipeline.Struct() + return Method{s}, err +} + +func (p Method_Promise) ParamBrand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(2)} +} + +func (p Method_Promise) ResultBrand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(3)} +} + +type Type struct{ capnp.Struct } +type Type_list Type +type Type_enum Type +type Type_structType Type +type Type_interface Type +type Type_anyPointer Type +type Type_anyPointer_unconstrained Type +type Type_anyPointer_parameter Type +type Type_anyPointer_implicitMethodParameter Type +type Type_Which uint16 + +const ( + Type_Which_void Type_Which = 0 + Type_Which_bool Type_Which = 1 + Type_Which_int8 Type_Which = 2 + Type_Which_int16 Type_Which = 3 + Type_Which_int32 Type_Which = 4 + Type_Which_int64 Type_Which = 5 + Type_Which_uint8 Type_Which = 6 + Type_Which_uint16 Type_Which = 7 + Type_Which_uint32 Type_Which = 8 + Type_Which_uint64 Type_Which = 9 + Type_Which_float32 Type_Which = 10 + Type_Which_float64 Type_Which = 11 + Type_Which_text Type_Which = 12 + Type_Which_data Type_Which = 13 + Type_Which_list Type_Which = 14 + Type_Which_enum Type_Which = 15 + Type_Which_structType Type_Which = 16 + Type_Which_interface Type_Which = 17 + Type_Which_anyPointer Type_Which = 18 +) + +func (w Type_Which) String() string { + const s = "voidboolint8int16int32int64uint8uint16uint32uint64float32float64textdatalistenumstructTypeinterfaceanyPointer" + switch w { + case Type_Which_void: + return s[0:4] + case Type_Which_bool: + return s[4:8] + case Type_Which_int8: + return s[8:12] + case Type_Which_int16: + return s[12:17] + case Type_Which_int32: + return s[17:22] + case Type_Which_int64: + return s[22:27] + case Type_Which_uint8: + return s[27:32] + case Type_Which_uint16: + return s[32:38] + case Type_Which_uint32: + return s[38:44] + case Type_Which_uint64: + return s[44:50] + case Type_Which_float32: + return s[50:57] + case Type_Which_float64: + return s[57:64] + case Type_Which_text: + return s[64:68] + case Type_Which_data: + return s[68:72] + case Type_Which_list: + return s[72:76] + case Type_Which_enum: + return s[76:80] + case Type_Which_structType: + return s[80:90] + case Type_Which_interface: + return s[90:99] + case Type_Which_anyPointer: + return s[99:109] + + } + return "Type_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +type Type_anyPointer_Which uint16 + +const ( + Type_anyPointer_Which_unconstrained Type_anyPointer_Which = 0 + Type_anyPointer_Which_parameter Type_anyPointer_Which = 1 + Type_anyPointer_Which_implicitMethodParameter Type_anyPointer_Which = 2 +) + +func (w Type_anyPointer_Which) String() string { + const s = "unconstrainedparameterimplicitMethodParameter" + switch w { + case Type_anyPointer_Which_unconstrained: + return s[0:13] + case Type_anyPointer_Which_parameter: + return s[13:22] + case Type_anyPointer_Which_implicitMethodParameter: + return s[22:45] + + } + return "Type_anyPointer_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +type Type_anyPointer_unconstrained_Which uint16 + +const ( + Type_anyPointer_unconstrained_Which_anyKind Type_anyPointer_unconstrained_Which = 0 + Type_anyPointer_unconstrained_Which_struct Type_anyPointer_unconstrained_Which = 1 + Type_anyPointer_unconstrained_Which_list Type_anyPointer_unconstrained_Which = 2 + Type_anyPointer_unconstrained_Which_capability Type_anyPointer_unconstrained_Which = 3 +) + +func (w Type_anyPointer_unconstrained_Which) String() string { + const s = "anyKindstructlistcapability" + switch w { + case Type_anyPointer_unconstrained_Which_anyKind: + return s[0:7] + case Type_anyPointer_unconstrained_Which_struct: + return s[7:13] + case Type_anyPointer_unconstrained_Which_list: + return s[13:17] + case Type_anyPointer_unconstrained_Which_capability: + return s[17:27] + + } + return "Type_anyPointer_unconstrained_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Type_TypeID is the unique identifier for the type Type. +const Type_TypeID = 0xd07378ede1f9cc60 + +func NewType(s *capnp.Segment) (Type, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 1}) + return Type{st}, err +} + +func NewRootType(s *capnp.Segment) (Type, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 1}) + return Type{st}, err +} + +func ReadRootType(msg *capnp.Message) (Type, error) { + root, err := msg.RootPtr() + return Type{root.Struct()}, err +} + +func (s Type) Which() Type_Which { + return Type_Which(s.Struct.Uint16(0)) +} +func (s Type) SetVoid() { + s.Struct.SetUint16(0, 0) + +} + +func (s Type) SetBool() { + s.Struct.SetUint16(0, 1) + +} + +func (s Type) SetInt8() { + s.Struct.SetUint16(0, 2) + +} + +func (s Type) SetInt16() { + s.Struct.SetUint16(0, 3) + +} + +func (s Type) SetInt32() { + s.Struct.SetUint16(0, 4) + +} + +func (s Type) SetInt64() { + s.Struct.SetUint16(0, 5) + +} + +func (s Type) SetUint8() { + s.Struct.SetUint16(0, 6) + +} + +func (s Type) SetUint16() { + s.Struct.SetUint16(0, 7) + +} + +func (s Type) SetUint32() { + s.Struct.SetUint16(0, 8) + +} + +func (s Type) SetUint64() { + s.Struct.SetUint16(0, 9) + +} + +func (s Type) SetFloat32() { + s.Struct.SetUint16(0, 10) + +} + +func (s Type) SetFloat64() { + s.Struct.SetUint16(0, 11) + +} + +func (s Type) SetText() { + s.Struct.SetUint16(0, 12) + +} + +func (s Type) SetData() { + s.Struct.SetUint16(0, 13) + +} + +func (s Type) List() Type_list { return Type_list(s) } + +func (s Type) SetList() { + s.Struct.SetUint16(0, 14) +} + +func (s Type_list) ElementType() (Type, error) { + p, err := s.Struct.Ptr(0) + return Type{Struct: p.Struct()}, err +} + +func (s Type_list) HasElementType() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Type_list) SetElementType(v Type) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewElementType sets the elementType field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Type_list) NewElementType() (Type, error) { + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s Type) Enum() Type_enum { return Type_enum(s) } + +func (s Type) SetEnum() { + s.Struct.SetUint16(0, 15) +} + +func (s Type_enum) TypeId() uint64 { + return s.Struct.Uint64(8) +} + +func (s Type_enum) SetTypeId(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s Type_enum) Brand() (Brand, error) { + p, err := s.Struct.Ptr(0) + return Brand{Struct: p.Struct()}, err +} + +func (s Type_enum) HasBrand() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Type_enum) SetBrand(v Brand) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Type_enum) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s Type) StructType() Type_structType { return Type_structType(s) } + +func (s Type) SetStructType() { + s.Struct.SetUint16(0, 16) +} + +func (s Type_structType) TypeId() uint64 { + return s.Struct.Uint64(8) +} + +func (s Type_structType) SetTypeId(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s Type_structType) Brand() (Brand, error) { + p, err := s.Struct.Ptr(0) + return Brand{Struct: p.Struct()}, err +} + +func (s Type_structType) HasBrand() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Type_structType) SetBrand(v Brand) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Type_structType) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s Type) Interface() Type_interface { return Type_interface(s) } + +func (s Type) SetInterface() { + s.Struct.SetUint16(0, 17) +} + +func (s Type_interface) TypeId() uint64 { + return s.Struct.Uint64(8) +} + +func (s Type_interface) SetTypeId(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s Type_interface) Brand() (Brand, error) { + p, err := s.Struct.Ptr(0) + return Brand{Struct: p.Struct()}, err +} + +func (s Type_interface) HasBrand() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Type_interface) SetBrand(v Brand) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Type_interface) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s Type) AnyPointer() Type_anyPointer { return Type_anyPointer(s) } + +func (s Type) SetAnyPointer() { + s.Struct.SetUint16(0, 18) +} + +func (s Type_anyPointer) Which() Type_anyPointer_Which { + return Type_anyPointer_Which(s.Struct.Uint16(8)) +} +func (s Type_anyPointer) Unconstrained() Type_anyPointer_unconstrained { + return Type_anyPointer_unconstrained(s) +} + +func (s Type_anyPointer) SetUnconstrained() { + s.Struct.SetUint16(8, 0) +} + +func (s Type_anyPointer_unconstrained) Which() Type_anyPointer_unconstrained_Which { + return Type_anyPointer_unconstrained_Which(s.Struct.Uint16(10)) +} +func (s Type_anyPointer_unconstrained) SetAnyKind() { + s.Struct.SetUint16(10, 0) + +} + +func (s Type_anyPointer_unconstrained) SetStruct() { + s.Struct.SetUint16(10, 1) + +} + +func (s Type_anyPointer_unconstrained) SetList() { + s.Struct.SetUint16(10, 2) + +} + +func (s Type_anyPointer_unconstrained) SetCapability() { + s.Struct.SetUint16(10, 3) + +} + +func (s Type_anyPointer) Parameter() Type_anyPointer_parameter { return Type_anyPointer_parameter(s) } + +func (s Type_anyPointer) SetParameter() { + s.Struct.SetUint16(8, 1) +} + +func (s Type_anyPointer_parameter) ScopeId() uint64 { + return s.Struct.Uint64(16) +} + +func (s Type_anyPointer_parameter) SetScopeId(v uint64) { + s.Struct.SetUint64(16, v) +} + +func (s Type_anyPointer_parameter) ParameterIndex() uint16 { + return s.Struct.Uint16(10) +} + +func (s Type_anyPointer_parameter) SetParameterIndex(v uint16) { + s.Struct.SetUint16(10, v) +} + +func (s Type_anyPointer) ImplicitMethodParameter() Type_anyPointer_implicitMethodParameter { + return Type_anyPointer_implicitMethodParameter(s) +} + +func (s Type_anyPointer) SetImplicitMethodParameter() { + s.Struct.SetUint16(8, 2) +} + +func (s Type_anyPointer_implicitMethodParameter) ParameterIndex() uint16 { + return s.Struct.Uint16(10) +} + +func (s Type_anyPointer_implicitMethodParameter) SetParameterIndex(v uint16) { + s.Struct.SetUint16(10, v) +} + +// Type_List is a list of Type. +type Type_List struct{ capnp.List } + +// NewType creates a new list of Type. +func NewType_List(s *capnp.Segment, sz int32) (Type_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 1}, sz) + return Type_List{l}, err +} + +func (s Type_List) At(i int) Type { return Type{s.List.Struct(i)} } + +func (s Type_List) Set(i int, v Type) error { return s.List.SetStruct(i, v.Struct) } + +// Type_Promise is a wrapper for a Type promised by a client call. +type Type_Promise struct{ *capnp.Pipeline } + +func (p Type_Promise) Struct() (Type, error) { + s, err := p.Pipeline.Struct() + return Type{s}, err +} + +func (p Type_Promise) List() Type_list_Promise { return Type_list_Promise{p.Pipeline} } + +// Type_list_Promise is a wrapper for a Type_list promised by a client call. +type Type_list_Promise struct{ *capnp.Pipeline } + +func (p Type_list_Promise) Struct() (Type_list, error) { + s, err := p.Pipeline.Struct() + return Type_list{s}, err +} + +func (p Type_list_Promise) ElementType() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p Type_Promise) Enum() Type_enum_Promise { return Type_enum_Promise{p.Pipeline} } + +// Type_enum_Promise is a wrapper for a Type_enum promised by a client call. +type Type_enum_Promise struct{ *capnp.Pipeline } + +func (p Type_enum_Promise) Struct() (Type_enum, error) { + s, err := p.Pipeline.Struct() + return Type_enum{s}, err +} + +func (p Type_enum_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p Type_Promise) StructType() Type_structType_Promise { return Type_structType_Promise{p.Pipeline} } + +// Type_structType_Promise is a wrapper for a Type_structType promised by a client call. +type Type_structType_Promise struct{ *capnp.Pipeline } + +func (p Type_structType_Promise) Struct() (Type_structType, error) { + s, err := p.Pipeline.Struct() + return Type_structType{s}, err +} + +func (p Type_structType_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p Type_Promise) Interface() Type_interface_Promise { return Type_interface_Promise{p.Pipeline} } + +// Type_interface_Promise is a wrapper for a Type_interface promised by a client call. +type Type_interface_Promise struct{ *capnp.Pipeline } + +func (p Type_interface_Promise) Struct() (Type_interface, error) { + s, err := p.Pipeline.Struct() + return Type_interface{s}, err +} + +func (p Type_interface_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p Type_Promise) AnyPointer() Type_anyPointer_Promise { return Type_anyPointer_Promise{p.Pipeline} } + +// Type_anyPointer_Promise is a wrapper for a Type_anyPointer promised by a client call. +type Type_anyPointer_Promise struct{ *capnp.Pipeline } + +func (p Type_anyPointer_Promise) Struct() (Type_anyPointer, error) { + s, err := p.Pipeline.Struct() + return Type_anyPointer{s}, err +} + +func (p Type_anyPointer_Promise) Unconstrained() Type_anyPointer_unconstrained_Promise { + return Type_anyPointer_unconstrained_Promise{p.Pipeline} +} + +// Type_anyPointer_unconstrained_Promise is a wrapper for a Type_anyPointer_unconstrained promised by a client call. +type Type_anyPointer_unconstrained_Promise struct{ *capnp.Pipeline } + +func (p Type_anyPointer_unconstrained_Promise) Struct() (Type_anyPointer_unconstrained, error) { + s, err := p.Pipeline.Struct() + return Type_anyPointer_unconstrained{s}, err +} + +func (p Type_anyPointer_Promise) Parameter() Type_anyPointer_parameter_Promise { + return Type_anyPointer_parameter_Promise{p.Pipeline} +} + +// Type_anyPointer_parameter_Promise is a wrapper for a Type_anyPointer_parameter promised by a client call. +type Type_anyPointer_parameter_Promise struct{ *capnp.Pipeline } + +func (p Type_anyPointer_parameter_Promise) Struct() (Type_anyPointer_parameter, error) { + s, err := p.Pipeline.Struct() + return Type_anyPointer_parameter{s}, err +} + +func (p Type_anyPointer_Promise) ImplicitMethodParameter() Type_anyPointer_implicitMethodParameter_Promise { + return Type_anyPointer_implicitMethodParameter_Promise{p.Pipeline} +} + +// Type_anyPointer_implicitMethodParameter_Promise is a wrapper for a Type_anyPointer_implicitMethodParameter promised by a client call. +type Type_anyPointer_implicitMethodParameter_Promise struct{ *capnp.Pipeline } + +func (p Type_anyPointer_implicitMethodParameter_Promise) Struct() (Type_anyPointer_implicitMethodParameter, error) { + s, err := p.Pipeline.Struct() + return Type_anyPointer_implicitMethodParameter{s}, err +} + +type Brand struct{ capnp.Struct } + +// Brand_TypeID is the unique identifier for the type Brand. +const Brand_TypeID = 0x903455f06065422b + +func NewBrand(s *capnp.Segment) (Brand, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Brand{st}, err +} + +func NewRootBrand(s *capnp.Segment) (Brand, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Brand{st}, err +} + +func ReadRootBrand(msg *capnp.Message) (Brand, error) { + root, err := msg.RootPtr() + return Brand{root.Struct()}, err +} + +func (s Brand) Scopes() (Brand_Scope_List, error) { + p, err := s.Struct.Ptr(0) + return Brand_Scope_List{List: p.List()}, err +} + +func (s Brand) HasScopes() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Brand) SetScopes(v Brand_Scope_List) error { + return s.Struct.SetPtr(0, v.List.ToPtr()) +} + +// NewScopes sets the scopes field to a newly +// allocated Brand_Scope_List, preferring placement in s's segment. +func (s Brand) NewScopes(n int32) (Brand_Scope_List, error) { + l, err := NewBrand_Scope_List(s.Struct.Segment(), n) + if err != nil { + return Brand_Scope_List{}, err + } + err = s.Struct.SetPtr(0, l.List.ToPtr()) + return l, err +} + +// Brand_List is a list of Brand. +type Brand_List struct{ capnp.List } + +// NewBrand creates a new list of Brand. +func NewBrand_List(s *capnp.Segment, sz int32) (Brand_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return Brand_List{l}, err +} + +func (s Brand_List) At(i int) Brand { return Brand{s.List.Struct(i)} } + +func (s Brand_List) Set(i int, v Brand) error { return s.List.SetStruct(i, v.Struct) } + +// Brand_Promise is a wrapper for a Brand promised by a client call. +type Brand_Promise struct{ *capnp.Pipeline } + +func (p Brand_Promise) Struct() (Brand, error) { + s, err := p.Pipeline.Struct() + return Brand{s}, err +} + +type Brand_Scope struct{ capnp.Struct } +type Brand_Scope_Which uint16 + +const ( + Brand_Scope_Which_bind Brand_Scope_Which = 0 + Brand_Scope_Which_inherit Brand_Scope_Which = 1 +) + +func (w Brand_Scope_Which) String() string { + const s = "bindinherit" + switch w { + case Brand_Scope_Which_bind: + return s[0:4] + case Brand_Scope_Which_inherit: + return s[4:11] + + } + return "Brand_Scope_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Brand_Scope_TypeID is the unique identifier for the type Brand_Scope. +const Brand_Scope_TypeID = 0xabd73485a9636bc9 + +func NewBrand_Scope(s *capnp.Segment) (Brand_Scope, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Brand_Scope{st}, err +} + +func NewRootBrand_Scope(s *capnp.Segment) (Brand_Scope, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Brand_Scope{st}, err +} + +func ReadRootBrand_Scope(msg *capnp.Message) (Brand_Scope, error) { + root, err := msg.RootPtr() + return Brand_Scope{root.Struct()}, err +} + +func (s Brand_Scope) Which() Brand_Scope_Which { + return Brand_Scope_Which(s.Struct.Uint16(8)) +} +func (s Brand_Scope) ScopeId() uint64 { + return s.Struct.Uint64(0) +} + +func (s Brand_Scope) SetScopeId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s Brand_Scope) Bind() (Brand_Binding_List, error) { + p, err := s.Struct.Ptr(0) + return Brand_Binding_List{List: p.List()}, err +} + +func (s Brand_Scope) HasBind() bool { + if s.Struct.Uint16(8) != 0 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Brand_Scope) SetBind(v Brand_Binding_List) error { + s.Struct.SetUint16(8, 0) + return s.Struct.SetPtr(0, v.List.ToPtr()) +} + +// NewBind sets the bind field to a newly +// allocated Brand_Binding_List, preferring placement in s's segment. +func (s Brand_Scope) NewBind(n int32) (Brand_Binding_List, error) { + s.Struct.SetUint16(8, 0) + l, err := NewBrand_Binding_List(s.Struct.Segment(), n) + if err != nil { + return Brand_Binding_List{}, err + } + err = s.Struct.SetPtr(0, l.List.ToPtr()) + return l, err +} + +func (s Brand_Scope) SetInherit() { + s.Struct.SetUint16(8, 1) + +} + +// Brand_Scope_List is a list of Brand_Scope. +type Brand_Scope_List struct{ capnp.List } + +// NewBrand_Scope creates a new list of Brand_Scope. +func NewBrand_Scope_List(s *capnp.Segment, sz int32) (Brand_Scope_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}, sz) + return Brand_Scope_List{l}, err +} + +func (s Brand_Scope_List) At(i int) Brand_Scope { return Brand_Scope{s.List.Struct(i)} } + +func (s Brand_Scope_List) Set(i int, v Brand_Scope) error { return s.List.SetStruct(i, v.Struct) } + +// Brand_Scope_Promise is a wrapper for a Brand_Scope promised by a client call. +type Brand_Scope_Promise struct{ *capnp.Pipeline } + +func (p Brand_Scope_Promise) Struct() (Brand_Scope, error) { + s, err := p.Pipeline.Struct() + return Brand_Scope{s}, err +} + +type Brand_Binding struct{ capnp.Struct } +type Brand_Binding_Which uint16 + +const ( + Brand_Binding_Which_unbound Brand_Binding_Which = 0 + Brand_Binding_Which_type Brand_Binding_Which = 1 +) + +func (w Brand_Binding_Which) String() string { + const s = "unboundtype" + switch w { + case Brand_Binding_Which_unbound: + return s[0:7] + case Brand_Binding_Which_type: + return s[7:11] + + } + return "Brand_Binding_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Brand_Binding_TypeID is the unique identifier for the type Brand_Binding. +const Brand_Binding_TypeID = 0xc863cd16969ee7fc + +func NewBrand_Binding(s *capnp.Segment) (Brand_Binding, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Brand_Binding{st}, err +} + +func NewRootBrand_Binding(s *capnp.Segment) (Brand_Binding, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Brand_Binding{st}, err +} + +func ReadRootBrand_Binding(msg *capnp.Message) (Brand_Binding, error) { + root, err := msg.RootPtr() + return Brand_Binding{root.Struct()}, err +} + +func (s Brand_Binding) Which() Brand_Binding_Which { + return Brand_Binding_Which(s.Struct.Uint16(0)) +} +func (s Brand_Binding) SetUnbound() { + s.Struct.SetUint16(0, 0) + +} + +func (s Brand_Binding) Type() (Type, error) { + p, err := s.Struct.Ptr(0) + return Type{Struct: p.Struct()}, err +} + +func (s Brand_Binding) HasType() bool { + if s.Struct.Uint16(0) != 1 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Brand_Binding) SetType(v Type) error { + s.Struct.SetUint16(0, 1) + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewType sets the type field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Brand_Binding) NewType() (Type, error) { + s.Struct.SetUint16(0, 1) + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// Brand_Binding_List is a list of Brand_Binding. +type Brand_Binding_List struct{ capnp.List } + +// NewBrand_Binding creates a new list of Brand_Binding. +func NewBrand_Binding_List(s *capnp.Segment, sz int32) (Brand_Binding_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz) + return Brand_Binding_List{l}, err +} + +func (s Brand_Binding_List) At(i int) Brand_Binding { return Brand_Binding{s.List.Struct(i)} } + +func (s Brand_Binding_List) Set(i int, v Brand_Binding) error { return s.List.SetStruct(i, v.Struct) } + +// Brand_Binding_Promise is a wrapper for a Brand_Binding promised by a client call. +type Brand_Binding_Promise struct{ *capnp.Pipeline } + +func (p Brand_Binding_Promise) Struct() (Brand_Binding, error) { + s, err := p.Pipeline.Struct() + return Brand_Binding{s}, err +} + +func (p Brand_Binding_Promise) Type() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +type Value struct{ capnp.Struct } +type Value_Which uint16 + +const ( + Value_Which_void Value_Which = 0 + Value_Which_bool Value_Which = 1 + Value_Which_int8 Value_Which = 2 + Value_Which_int16 Value_Which = 3 + Value_Which_int32 Value_Which = 4 + Value_Which_int64 Value_Which = 5 + Value_Which_uint8 Value_Which = 6 + Value_Which_uint16 Value_Which = 7 + Value_Which_uint32 Value_Which = 8 + Value_Which_uint64 Value_Which = 9 + Value_Which_float32 Value_Which = 10 + Value_Which_float64 Value_Which = 11 + Value_Which_text Value_Which = 12 + Value_Which_data Value_Which = 13 + Value_Which_list Value_Which = 14 + Value_Which_enum Value_Which = 15 + Value_Which_structValue Value_Which = 16 + Value_Which_interface Value_Which = 17 + Value_Which_anyPointer Value_Which = 18 +) + +func (w Value_Which) String() string { + const s = "voidboolint8int16int32int64uint8uint16uint32uint64float32float64textdatalistenumstructValueinterfaceanyPointer" + switch w { + case Value_Which_void: + return s[0:4] + case Value_Which_bool: + return s[4:8] + case Value_Which_int8: + return s[8:12] + case Value_Which_int16: + return s[12:17] + case Value_Which_int32: + return s[17:22] + case Value_Which_int64: + return s[22:27] + case Value_Which_uint8: + return s[27:32] + case Value_Which_uint16: + return s[32:38] + case Value_Which_uint32: + return s[38:44] + case Value_Which_uint64: + return s[44:50] + case Value_Which_float32: + return s[50:57] + case Value_Which_float64: + return s[57:64] + case Value_Which_text: + return s[64:68] + case Value_Which_data: + return s[68:72] + case Value_Which_list: + return s[72:76] + case Value_Which_enum: + return s[76:80] + case Value_Which_structValue: + return s[80:91] + case Value_Which_interface: + return s[91:100] + case Value_Which_anyPointer: + return s[100:110] + + } + return "Value_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Value_TypeID is the unique identifier for the type Value. +const Value_TypeID = 0xce23dcd2d7b00c9b + +func NewValue(s *capnp.Segment) (Value, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Value{st}, err +} + +func NewRootValue(s *capnp.Segment) (Value, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Value{st}, err +} + +func ReadRootValue(msg *capnp.Message) (Value, error) { + root, err := msg.RootPtr() + return Value{root.Struct()}, err +} + +func (s Value) Which() Value_Which { + return Value_Which(s.Struct.Uint16(0)) +} +func (s Value) SetVoid() { + s.Struct.SetUint16(0, 0) + +} + +func (s Value) Bool() bool { + return s.Struct.Bit(16) +} + +func (s Value) SetBool(v bool) { + s.Struct.SetUint16(0, 1) + s.Struct.SetBit(16, v) +} + +func (s Value) Int8() int8 { + return int8(s.Struct.Uint8(2)) +} + +func (s Value) SetInt8(v int8) { + s.Struct.SetUint16(0, 2) + s.Struct.SetUint8(2, uint8(v)) +} + +func (s Value) Int16() int16 { + return int16(s.Struct.Uint16(2)) +} + +func (s Value) SetInt16(v int16) { + s.Struct.SetUint16(0, 3) + s.Struct.SetUint16(2, uint16(v)) +} + +func (s Value) Int32() int32 { + return int32(s.Struct.Uint32(4)) +} + +func (s Value) SetInt32(v int32) { + s.Struct.SetUint16(0, 4) + s.Struct.SetUint32(4, uint32(v)) +} + +func (s Value) Int64() int64 { + return int64(s.Struct.Uint64(8)) +} + +func (s Value) SetInt64(v int64) { + s.Struct.SetUint16(0, 5) + s.Struct.SetUint64(8, uint64(v)) +} + +func (s Value) Uint8() uint8 { + return s.Struct.Uint8(2) +} + +func (s Value) SetUint8(v uint8) { + s.Struct.SetUint16(0, 6) + s.Struct.SetUint8(2, v) +} + +func (s Value) Uint16() uint16 { + return s.Struct.Uint16(2) +} + +func (s Value) SetUint16(v uint16) { + s.Struct.SetUint16(0, 7) + s.Struct.SetUint16(2, v) +} + +func (s Value) Uint32() uint32 { + return s.Struct.Uint32(4) +} + +func (s Value) SetUint32(v uint32) { + s.Struct.SetUint16(0, 8) + s.Struct.SetUint32(4, v) +} + +func (s Value) Uint64() uint64 { + return s.Struct.Uint64(8) +} + +func (s Value) SetUint64(v uint64) { + s.Struct.SetUint16(0, 9) + s.Struct.SetUint64(8, v) +} + +func (s Value) Float32() float32 { + return math.Float32frombits(s.Struct.Uint32(4)) +} + +func (s Value) SetFloat32(v float32) { + s.Struct.SetUint16(0, 10) + s.Struct.SetUint32(4, math.Float32bits(v)) +} + +func (s Value) Float64() float64 { + return math.Float64frombits(s.Struct.Uint64(8)) +} + +func (s Value) SetFloat64(v float64) { + s.Struct.SetUint16(0, 11) + s.Struct.SetUint64(8, math.Float64bits(v)) +} + +func (s Value) Text() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Value) HasText() bool { + if s.Struct.Uint16(0) != 12 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) TextBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Value) SetText(v string) error { + s.Struct.SetUint16(0, 12) + return s.Struct.SetText(0, v) +} + +func (s Value) Data() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return []byte(p.Data()), err +} + +func (s Value) HasData() bool { + if s.Struct.Uint16(0) != 13 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) SetData(v []byte) error { + s.Struct.SetUint16(0, 13) + return s.Struct.SetData(0, v) +} + +func (s Value) List() (capnp.Pointer, error) { + return s.Struct.Pointer(0) +} + +func (s Value) HasList() bool { + if s.Struct.Uint16(0) != 14 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) ListPtr() (capnp.Ptr, error) { + return s.Struct.Ptr(0) +} + +func (s Value) SetList(v capnp.Pointer) error { + s.Struct.SetUint16(0, 14) + return s.Struct.SetPointer(0, v) +} + +func (s Value) SetListPtr(v capnp.Ptr) error { + s.Struct.SetUint16(0, 14) + return s.Struct.SetPtr(0, v) +} + +func (s Value) Enum() uint16 { + return s.Struct.Uint16(2) +} + +func (s Value) SetEnum(v uint16) { + s.Struct.SetUint16(0, 15) + s.Struct.SetUint16(2, v) +} + +func (s Value) StructValue() (capnp.Pointer, error) { + return s.Struct.Pointer(0) +} + +func (s Value) HasStructValue() bool { + if s.Struct.Uint16(0) != 16 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) StructValuePtr() (capnp.Ptr, error) { + return s.Struct.Ptr(0) +} + +func (s Value) SetStructValue(v capnp.Pointer) error { + s.Struct.SetUint16(0, 16) + return s.Struct.SetPointer(0, v) +} + +func (s Value) SetStructValuePtr(v capnp.Ptr) error { + s.Struct.SetUint16(0, 16) + return s.Struct.SetPtr(0, v) +} + +func (s Value) SetInterface() { + s.Struct.SetUint16(0, 17) + +} + +func (s Value) AnyPointer() (capnp.Pointer, error) { + return s.Struct.Pointer(0) +} + +func (s Value) HasAnyPointer() bool { + if s.Struct.Uint16(0) != 18 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) AnyPointerPtr() (capnp.Ptr, error) { + return s.Struct.Ptr(0) +} + +func (s Value) SetAnyPointer(v capnp.Pointer) error { + s.Struct.SetUint16(0, 18) + return s.Struct.SetPointer(0, v) +} + +func (s Value) SetAnyPointerPtr(v capnp.Ptr) error { + s.Struct.SetUint16(0, 18) + return s.Struct.SetPtr(0, v) +} + +// Value_List is a list of Value. +type Value_List struct{ capnp.List } + +// NewValue creates a new list of Value. +func NewValue_List(s *capnp.Segment, sz int32) (Value_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}, sz) + return Value_List{l}, err +} + +func (s Value_List) At(i int) Value { return Value{s.List.Struct(i)} } + +func (s Value_List) Set(i int, v Value) error { return s.List.SetStruct(i, v.Struct) } + +// Value_Promise is a wrapper for a Value promised by a client call. +type Value_Promise struct{ *capnp.Pipeline } + +func (p Value_Promise) Struct() (Value, error) { + s, err := p.Pipeline.Struct() + return Value{s}, err +} + +func (p Value_Promise) List() *capnp.Pipeline { + return p.Pipeline.GetPipeline(0) +} + +func (p Value_Promise) StructValue() *capnp.Pipeline { + return p.Pipeline.GetPipeline(0) +} + +func (p Value_Promise) AnyPointer() *capnp.Pipeline { + return p.Pipeline.GetPipeline(0) +} + +type Annotation struct{ capnp.Struct } + +// Annotation_TypeID is the unique identifier for the type Annotation. +const Annotation_TypeID = 0xf1c8950dab257542 + +func NewAnnotation(s *capnp.Segment) (Annotation, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Annotation{st}, err +} + +func NewRootAnnotation(s *capnp.Segment) (Annotation, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Annotation{st}, err +} + +func ReadRootAnnotation(msg *capnp.Message) (Annotation, error) { + root, err := msg.RootPtr() + return Annotation{root.Struct()}, err +} + +func (s Annotation) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s Annotation) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s Annotation) Brand() (Brand, error) { + p, err := s.Struct.Ptr(1) + return Brand{Struct: p.Struct()}, err +} + +func (s Annotation) HasBrand() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Annotation) SetBrand(v Brand) error { + return s.Struct.SetPtr(1, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Annotation) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(1, ss.Struct.ToPtr()) + return ss, err +} + +func (s Annotation) Value() (Value, error) { + p, err := s.Struct.Ptr(0) + return Value{Struct: p.Struct()}, err +} + +func (s Annotation) HasValue() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Annotation) SetValue(v Value) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewValue sets the value field to a newly +// allocated Value struct, preferring placement in s's segment. +func (s Annotation) NewValue() (Value, error) { + ss, err := NewValue(s.Struct.Segment()) + if err != nil { + return Value{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// Annotation_List is a list of Annotation. +type Annotation_List struct{ capnp.List } + +// NewAnnotation creates a new list of Annotation. +func NewAnnotation_List(s *capnp.Segment, sz int32) (Annotation_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz) + return Annotation_List{l}, err +} + +func (s Annotation_List) At(i int) Annotation { return Annotation{s.List.Struct(i)} } + +func (s Annotation_List) Set(i int, v Annotation) error { return s.List.SetStruct(i, v.Struct) } + +// Annotation_Promise is a wrapper for a Annotation promised by a client call. +type Annotation_Promise struct{ *capnp.Pipeline } + +func (p Annotation_Promise) Struct() (Annotation, error) { + s, err := p.Pipeline.Struct() + return Annotation{s}, err +} + +func (p Annotation_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(1)} +} + +func (p Annotation_Promise) Value() Value_Promise { + return Value_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +type ElementSize uint16 + +// ElementSize_TypeID is the unique identifier for the type ElementSize. +const ElementSize_TypeID = 0xd1958f7dba521926 + +// Values of ElementSize. +const ( + ElementSize_empty ElementSize = 0 + ElementSize_bit ElementSize = 1 + ElementSize_byte ElementSize = 2 + ElementSize_twoBytes ElementSize = 3 + ElementSize_fourBytes ElementSize = 4 + ElementSize_eightBytes ElementSize = 5 + ElementSize_pointer ElementSize = 6 + ElementSize_inlineComposite ElementSize = 7 +) + +// String returns the enum's constant name. +func (c ElementSize) String() string { + switch c { + case ElementSize_empty: + return "empty" + case ElementSize_bit: + return "bit" + case ElementSize_byte: + return "byte" + case ElementSize_twoBytes: + return "twoBytes" + case ElementSize_fourBytes: + return "fourBytes" + case ElementSize_eightBytes: + return "eightBytes" + case ElementSize_pointer: + return "pointer" + case ElementSize_inlineComposite: + return "inlineComposite" + + default: + return "" + } +} + +// ElementSizeFromString returns the enum value with a name, +// or the zero value if there's no such value. +func ElementSizeFromString(c string) ElementSize { + switch c { + case "empty": + return ElementSize_empty + case "bit": + return ElementSize_bit + case "byte": + return ElementSize_byte + case "twoBytes": + return ElementSize_twoBytes + case "fourBytes": + return ElementSize_fourBytes + case "eightBytes": + return ElementSize_eightBytes + case "pointer": + return ElementSize_pointer + case "inlineComposite": + return ElementSize_inlineComposite + + default: + return 0 + } +} + +type ElementSize_List struct{ capnp.List } + +func NewElementSize_List(s *capnp.Segment, sz int32) (ElementSize_List, error) { + l, err := capnp.NewUInt16List(s, sz) + return ElementSize_List{l.List}, err +} + +func (l ElementSize_List) At(i int) ElementSize { + ul := capnp.UInt16List{List: l.List} + return ElementSize(ul.At(i)) +} + +func (l ElementSize_List) Set(i int, v ElementSize) { + ul := capnp.UInt16List{List: l.List} + ul.Set(i, uint16(v)) +} + +type CodeGeneratorRequest struct{ capnp.Struct } + +// CodeGeneratorRequest_TypeID is the unique identifier for the type CodeGeneratorRequest. +const CodeGeneratorRequest_TypeID = 0xbfc546f6210ad7ce + +func NewCodeGeneratorRequest(s *capnp.Segment) (CodeGeneratorRequest, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return CodeGeneratorRequest{st}, err +} + +func NewRootCodeGeneratorRequest(s *capnp.Segment) (CodeGeneratorRequest, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return CodeGeneratorRequest{st}, err +} + +func ReadRootCodeGeneratorRequest(msg *capnp.Message) (CodeGeneratorRequest, error) { + root, err := msg.RootPtr() + return CodeGeneratorRequest{root.Struct()}, err +} + +func (s CodeGeneratorRequest) Nodes() (Node_List, error) { + p, err := s.Struct.Ptr(0) + return Node_List{List: p.List()}, err +} + +func (s CodeGeneratorRequest) HasNodes() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest) SetNodes(v Node_List) error { + return s.Struct.SetPtr(0, v.List.ToPtr()) +} + +// NewNodes sets the nodes field to a newly +// allocated Node_List, preferring placement in s's segment. +func (s CodeGeneratorRequest) NewNodes(n int32) (Node_List, error) { + l, err := NewNode_List(s.Struct.Segment(), n) + if err != nil { + return Node_List{}, err + } + err = s.Struct.SetPtr(0, l.List.ToPtr()) + return l, err +} + +func (s CodeGeneratorRequest) RequestedFiles() (CodeGeneratorRequest_RequestedFile_List, error) { + p, err := s.Struct.Ptr(1) + return CodeGeneratorRequest_RequestedFile_List{List: p.List()}, err +} + +func (s CodeGeneratorRequest) HasRequestedFiles() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest) SetRequestedFiles(v CodeGeneratorRequest_RequestedFile_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewRequestedFiles sets the requestedFiles field to a newly +// allocated CodeGeneratorRequest_RequestedFile_List, preferring placement in s's segment. +func (s CodeGeneratorRequest) NewRequestedFiles(n int32) (CodeGeneratorRequest_RequestedFile_List, error) { + l, err := NewCodeGeneratorRequest_RequestedFile_List(s.Struct.Segment(), n) + if err != nil { + return CodeGeneratorRequest_RequestedFile_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +// CodeGeneratorRequest_List is a list of CodeGeneratorRequest. +type CodeGeneratorRequest_List struct{ capnp.List } + +// NewCodeGeneratorRequest creates a new list of CodeGeneratorRequest. +func NewCodeGeneratorRequest_List(s *capnp.Segment, sz int32) (CodeGeneratorRequest_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz) + return CodeGeneratorRequest_List{l}, err +} + +func (s CodeGeneratorRequest_List) At(i int) CodeGeneratorRequest { + return CodeGeneratorRequest{s.List.Struct(i)} +} + +func (s CodeGeneratorRequest_List) Set(i int, v CodeGeneratorRequest) error { + return s.List.SetStruct(i, v.Struct) +} + +// CodeGeneratorRequest_Promise is a wrapper for a CodeGeneratorRequest promised by a client call. +type CodeGeneratorRequest_Promise struct{ *capnp.Pipeline } + +func (p CodeGeneratorRequest_Promise) Struct() (CodeGeneratorRequest, error) { + s, err := p.Pipeline.Struct() + return CodeGeneratorRequest{s}, err +} + +type CodeGeneratorRequest_RequestedFile struct{ capnp.Struct } + +// CodeGeneratorRequest_RequestedFile_TypeID is the unique identifier for the type CodeGeneratorRequest_RequestedFile. +const CodeGeneratorRequest_RequestedFile_TypeID = 0xcfea0eb02e810062 + +func NewCodeGeneratorRequest_RequestedFile(s *capnp.Segment) (CodeGeneratorRequest_RequestedFile, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return CodeGeneratorRequest_RequestedFile{st}, err +} + +func NewRootCodeGeneratorRequest_RequestedFile(s *capnp.Segment) (CodeGeneratorRequest_RequestedFile, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return CodeGeneratorRequest_RequestedFile{st}, err +} + +func ReadRootCodeGeneratorRequest_RequestedFile(msg *capnp.Message) (CodeGeneratorRequest_RequestedFile, error) { + root, err := msg.RootPtr() + return CodeGeneratorRequest_RequestedFile{root.Struct()}, err +} + +func (s CodeGeneratorRequest_RequestedFile) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s CodeGeneratorRequest_RequestedFile) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s CodeGeneratorRequest_RequestedFile) Filename() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s CodeGeneratorRequest_RequestedFile) HasFilename() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest_RequestedFile) FilenameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s CodeGeneratorRequest_RequestedFile) SetFilename(v string) error { + return s.Struct.SetText(0, v) +} + +func (s CodeGeneratorRequest_RequestedFile) Imports() (CodeGeneratorRequest_RequestedFile_Import_List, error) { + p, err := s.Struct.Ptr(1) + return CodeGeneratorRequest_RequestedFile_Import_List{List: p.List()}, err +} + +func (s CodeGeneratorRequest_RequestedFile) HasImports() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest_RequestedFile) SetImports(v CodeGeneratorRequest_RequestedFile_Import_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewImports sets the imports field to a newly +// allocated CodeGeneratorRequest_RequestedFile_Import_List, preferring placement in s's segment. +func (s CodeGeneratorRequest_RequestedFile) NewImports(n int32) (CodeGeneratorRequest_RequestedFile_Import_List, error) { + l, err := NewCodeGeneratorRequest_RequestedFile_Import_List(s.Struct.Segment(), n) + if err != nil { + return CodeGeneratorRequest_RequestedFile_Import_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +// CodeGeneratorRequest_RequestedFile_List is a list of CodeGeneratorRequest_RequestedFile. +type CodeGeneratorRequest_RequestedFile_List struct{ capnp.List } + +// NewCodeGeneratorRequest_RequestedFile creates a new list of CodeGeneratorRequest_RequestedFile. +func NewCodeGeneratorRequest_RequestedFile_List(s *capnp.Segment, sz int32) (CodeGeneratorRequest_RequestedFile_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz) + return CodeGeneratorRequest_RequestedFile_List{l}, err +} + +func (s CodeGeneratorRequest_RequestedFile_List) At(i int) CodeGeneratorRequest_RequestedFile { + return CodeGeneratorRequest_RequestedFile{s.List.Struct(i)} +} + +func (s CodeGeneratorRequest_RequestedFile_List) Set(i int, v CodeGeneratorRequest_RequestedFile) error { + return s.List.SetStruct(i, v.Struct) +} + +// CodeGeneratorRequest_RequestedFile_Promise is a wrapper for a CodeGeneratorRequest_RequestedFile promised by a client call. +type CodeGeneratorRequest_RequestedFile_Promise struct{ *capnp.Pipeline } + +func (p CodeGeneratorRequest_RequestedFile_Promise) Struct() (CodeGeneratorRequest_RequestedFile, error) { + s, err := p.Pipeline.Struct() + return CodeGeneratorRequest_RequestedFile{s}, err +} + +type CodeGeneratorRequest_RequestedFile_Import struct{ capnp.Struct } + +// CodeGeneratorRequest_RequestedFile_Import_TypeID is the unique identifier for the type CodeGeneratorRequest_RequestedFile_Import. +const CodeGeneratorRequest_RequestedFile_Import_TypeID = 0xae504193122357e5 + +func NewCodeGeneratorRequest_RequestedFile_Import(s *capnp.Segment) (CodeGeneratorRequest_RequestedFile_Import, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return CodeGeneratorRequest_RequestedFile_Import{st}, err +} + +func NewRootCodeGeneratorRequest_RequestedFile_Import(s *capnp.Segment) (CodeGeneratorRequest_RequestedFile_Import, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return CodeGeneratorRequest_RequestedFile_Import{st}, err +} + +func ReadRootCodeGeneratorRequest_RequestedFile_Import(msg *capnp.Message) (CodeGeneratorRequest_RequestedFile_Import, error) { + root, err := msg.RootPtr() + return CodeGeneratorRequest_RequestedFile_Import{root.Struct()}, err +} + +func (s CodeGeneratorRequest_RequestedFile_Import) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s CodeGeneratorRequest_RequestedFile_Import) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s CodeGeneratorRequest_RequestedFile_Import) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s CodeGeneratorRequest_RequestedFile_Import) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest_RequestedFile_Import) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s CodeGeneratorRequest_RequestedFile_Import) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +// CodeGeneratorRequest_RequestedFile_Import_List is a list of CodeGeneratorRequest_RequestedFile_Import. +type CodeGeneratorRequest_RequestedFile_Import_List struct{ capnp.List } + +// NewCodeGeneratorRequest_RequestedFile_Import creates a new list of CodeGeneratorRequest_RequestedFile_Import. +func NewCodeGeneratorRequest_RequestedFile_Import_List(s *capnp.Segment, sz int32) (CodeGeneratorRequest_RequestedFile_Import_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz) + return CodeGeneratorRequest_RequestedFile_Import_List{l}, err +} + +func (s CodeGeneratorRequest_RequestedFile_Import_List) At(i int) CodeGeneratorRequest_RequestedFile_Import { + return CodeGeneratorRequest_RequestedFile_Import{s.List.Struct(i)} +} + +func (s CodeGeneratorRequest_RequestedFile_Import_List) Set(i int, v CodeGeneratorRequest_RequestedFile_Import) error { + return s.List.SetStruct(i, v.Struct) +} + +// CodeGeneratorRequest_RequestedFile_Import_Promise is a wrapper for a CodeGeneratorRequest_RequestedFile_Import promised by a client call. +type CodeGeneratorRequest_RequestedFile_Import_Promise struct{ *capnp.Pipeline } + +func (p CodeGeneratorRequest_RequestedFile_Import_Promise) Struct() (CodeGeneratorRequest_RequestedFile_Import, error) { + s, err := p.Pipeline.Struct() + return CodeGeneratorRequest_RequestedFile_Import{s}, err +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/BUILD.bazel b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/BUILD.bazel new file mode 100644 index 00000000..39b4cf74 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/BUILD.bazel @@ -0,0 +1,7 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["strquote.go"], + visibility = ["//:__subpackages__"], +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/strquote.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/strquote.go new file mode 100644 index 00000000..f6af09b9 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/strquote.go @@ -0,0 +1,52 @@ +// Package strquote provides a function for formatting a string as a +// Cap'n Proto string literal. +package strquote + +// Append appends a Cap'n Proto string literal of s to buf. +func Append(buf []byte, s []byte) []byte { + buf = append(buf, '"') + last := 0 + for i, b := range s { + if !needsEscape(b) { + continue + } + buf = append(buf, s[last:i]...) + switch b { + case '\a': + buf = append(buf, '\\', 'a') + case '\b': + buf = append(buf, '\\', 'b') + case '\f': + buf = append(buf, '\\', 'f') + case '\n': + buf = append(buf, '\\', 'n') + case '\r': + buf = append(buf, '\\', 'r') + case '\t': + buf = append(buf, '\\', 't') + case '\v': + buf = append(buf, '\\', 'v') + case '\'': + buf = append(buf, '\\', '\'') + case '"': + buf = append(buf, '\\', '"') + case '\\': + buf = append(buf, '\\', '\\') + default: + buf = append(buf, '\\', 'x', hexDigit(b/16), hexDigit(b%16)) + } + last = i + 1 + } + buf = append(buf, s[last:]...) + buf = append(buf, '"') + return buf +} + +func needsEscape(b byte) bool { + return b < 0x20 || b >= 0x7f +} + +func hexDigit(b byte) byte { + const digits = "0123456789abcdef" + return digits[b] +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/list.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/list.go new file mode 100644 index 00000000..95a09492 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/list.go @@ -0,0 +1,1044 @@ +package capnp + +import ( + "errors" + "math" + "strconv" + + "zombiezen.com/go/capnproto2/internal/strquote" +) + +// A List is a reference to an array of values. +type List struct { + seg *Segment + off Address // at beginning of elements (past composite list tag word) + length int32 + size ObjectSize + depthLimit uint + flags listFlags +} + +// newPrimitiveList allocates a new list of primitive values, preferring placement in s. +func newPrimitiveList(s *Segment, sz Size, n int32) (List, error) { + total, ok := sz.times(n) + if !ok { + return List{}, errOverflow + } + s, addr, err := alloc(s, total) + if err != nil { + return List{}, err + } + return List{ + seg: s, + off: addr, + length: n, + size: ObjectSize{DataSize: sz}, + depthLimit: maxDepth, + }, nil +} + +// NewCompositeList creates a new composite list, preferring placement +// in s. +func NewCompositeList(s *Segment, sz ObjectSize, n int32) (List, error) { + if !sz.isValid() { + return List{}, errObjectSize + } + sz.DataSize = sz.DataSize.padToWord() + total, ok := sz.totalSize().times(n) + if !ok || total > maxSize-wordSize { + return List{}, errOverflow + } + s, addr, err := alloc(s, wordSize+total) + if err != nil { + return List{}, err + } + // Add tag word + s.writeRawPointer(addr, rawStructPointer(pointerOffset(n), sz)) + return List{ + seg: s, + off: addr + Address(wordSize), + length: n, + size: sz, + flags: isCompositeList, + depthLimit: maxDepth, + }, nil +} + +// ToList converts p to a List. +// +// Deprecated: Use Ptr.List. +func ToList(p Pointer) List { + return toPtr(p).List() +} + +// ToListDefault attempts to convert p into a list, reading the default +// value from def if p is not a list. +// +// Deprecated: Use Ptr.ListDefault. +func ToListDefault(p Pointer, def []byte) (List, error) { + return toPtr(p).ListDefault(def) +} + +// ToPtr converts the list to a generic pointer. +func (p List) ToPtr() Ptr { + return Ptr{ + seg: p.seg, + off: p.off, + lenOrCap: uint32(p.length), + size: p.size, + depthLimit: p.depthLimit, + flags: listPtrFlag(p.flags), + } +} + +// Segment returns the segment this pointer references. +func (p List) Segment() *Segment { + return p.seg +} + +// IsValid returns whether the list is valid. +func (p List) IsValid() bool { + return p.seg != nil +} + +// HasData reports whether the list's total size is non-zero. +func (p List) HasData() bool { + sz, ok := p.size.totalSize().times(p.length) + if !ok { + return false + } + return sz > 0 +} + +// readSize returns the list's size for the purposes of read limit +// accounting. +func (p List) readSize() Size { + if p.seg == nil { + return 0 + } + e := p.size.totalSize() + if e == 0 { + e = wordSize + } + sz, ok := e.times(p.length) + if !ok { + return maxSize + } + return sz +} + +// allocSize returns the list's size for the purpose of copying the list +// to a different message. +func (p List) allocSize() Size { + if p.seg == nil { + return 0 + } + if p.flags&isBitList != 0 { + return Size((p.length + 7) / 8) + } + sz, _ := p.size.totalSize().times(p.length) // size has already been validated + if p.flags&isCompositeList == 0 { + return sz + } + return sz + wordSize +} + +// raw returns the equivalent raw list pointer with a zero offset. +func (p List) raw() rawPointer { + if p.seg == nil { + return 0 + } + if p.flags&isCompositeList != 0 { + return rawListPointer(0, compositeList, p.length*p.size.totalWordCount()) + } + if p.flags&isBitList != 0 { + return rawListPointer(0, bit1List, p.length) + } + if p.size.PointerCount == 1 && p.size.DataSize == 0 { + return rawListPointer(0, pointerList, p.length) + } + if p.size.PointerCount != 0 { + panic(errListSize) + } + switch p.size.DataSize { + case 0: + return rawListPointer(0, voidList, p.length) + case 1: + return rawListPointer(0, byte1List, p.length) + case 2: + return rawListPointer(0, byte2List, p.length) + case 4: + return rawListPointer(0, byte4List, p.length) + case 8: + return rawListPointer(0, byte8List, p.length) + default: + panic(errListSize) + } +} + +func (p List) underlying() Pointer { + return p +} + +// Address returns the address the pointer references. +// +// Deprecated: The return value is not well-defined. Use SamePtr if you +// need to check whether two pointers refer to the same object. +func (p List) Address() Address { + return p.off +} + +// Len returns the length of the list. +func (p List) Len() int { + if p.seg == nil { + return 0 + } + return int(p.length) +} + +// primitiveElem returns the address of the segment data for a list element. +// Calling this on a bit list returns an error. +func (p List) primitiveElem(i int, expectedSize ObjectSize) (Address, error) { + if p.seg == nil || i < 0 || i >= int(p.length) { + // This is programmer error, not input error. + panic(errOutOfBounds) + } + if p.flags&isBitList != 0 || p.flags&isCompositeList == 0 && p.size != expectedSize || p.flags&isCompositeList != 0 && (p.size.DataSize < expectedSize.DataSize || p.size.PointerCount < expectedSize.PointerCount) { + return 0, errElementSize + } + addr, ok := p.off.element(int32(i), p.size.totalSize()) + if !ok { + return 0, errOverflow + } + return addr, nil +} + +// Struct returns the i'th element as a struct. +func (p List) Struct(i int) Struct { + if p.seg == nil || i < 0 || i >= int(p.length) { + // This is programmer error, not input error. + panic(errOutOfBounds) + } + if p.flags&isBitList != 0 { + return Struct{} + } + addr, ok := p.off.element(int32(i), p.size.totalSize()) + if !ok { + return Struct{} + } + return Struct{ + seg: p.seg, + off: addr, + size: p.size, + flags: isListMember, + depthLimit: p.depthLimit - 1, + } +} + +// SetStruct set the i'th element to the value in s. +func (p List) SetStruct(i int, s Struct) error { + if p.flags&isBitList != 0 { + return errBitListStruct + } + return copyStruct(p.Struct(i), s) +} + +// A BitList is a reference to a list of booleans. +type BitList struct{ List } + +// NewBitList creates a new bit list, preferring placement in s. +func NewBitList(s *Segment, n int32) (BitList, error) { + s, addr, err := alloc(s, Size(int64(n+7)/8)) + if err != nil { + return BitList{}, err + } + return BitList{List{ + seg: s, + off: addr, + length: n, + flags: isBitList, + depthLimit: maxDepth, + }}, nil +} + +// At returns the i'th bit. +func (p BitList) At(i int) bool { + if p.seg == nil || i < 0 || i >= int(p.length) { + // This is programmer error, not input error. + panic(errOutOfBounds) + } + if p.flags&isBitList == 0 { + return false + } + bit := BitOffset(i) + addr := p.off.addOffset(bit.offset()) + return p.seg.readUint8(addr)&bit.mask() != 0 +} + +// Set sets the i'th bit to v. +func (p BitList) Set(i int, v bool) { + if p.seg == nil || i < 0 || i >= int(p.length) { + // This is programmer error, not input error. + panic(errOutOfBounds) + } + if p.flags&isBitList == 0 { + // Again, programmer error. Should have used NewBitList. + panic(errElementSize) + } + bit := BitOffset(i) + addr := p.off.addOffset(bit.offset()) + b := p.seg.slice(addr, 1) + if v { + b[0] |= bit.mask() + } else { + b[0] &^= bit.mask() + } +} + +// String returns the list in Cap'n Proto schema format (e.g. "[true, false]"). +func (p BitList) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < p.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + if p.At(i) { + buf = append(buf, "true"...) + } else { + buf = append(buf, "false"...) + } + } + buf = append(buf, ']') + return string(buf) +} + +// A PointerList is a reference to an array of pointers. +type PointerList struct{ List } + +// NewPointerList allocates a new list of pointers, preferring placement in s. +func NewPointerList(s *Segment, n int32) (PointerList, error) { + total, ok := wordSize.times(n) + if !ok { + return PointerList{}, errOverflow + } + s, addr, err := alloc(s, total) + if err != nil { + return PointerList{}, err + } + return PointerList{List{ + seg: s, + off: addr, + length: n, + size: ObjectSize{PointerCount: 1}, + depthLimit: maxDepth, + }}, nil +} + +// At returns the i'th pointer in the list. +// +// Deprecated: Use PtrAt. +func (p PointerList) At(i int) (Pointer, error) { + pi, err := p.PtrAt(i) + return pi.toPointer(), err +} + +// PtrAt returns the i'th pointer in the list. +func (p PointerList) PtrAt(i int) (Ptr, error) { + addr, err := p.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return Ptr{}, err + } + return p.seg.readPtr(addr, p.depthLimit) +} + +// Set sets the i'th pointer in the list to v. +// +// Deprecated: Use SetPtr. +func (p PointerList) Set(i int, v Pointer) error { + return p.SetPtr(i, toPtr(v)) +} + +// SetPtr sets the i'th pointer in the list to v. +func (p PointerList) SetPtr(i int, v Ptr) error { + addr, err := p.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return err + } + return p.seg.writePtr(addr, v, false) +} + +// TextList is an array of pointers to strings. +type TextList struct{ List } + +// NewTextList allocates a new list of text pointers, preferring placement in s. +func NewTextList(s *Segment, n int32) (TextList, error) { + pl, err := NewPointerList(s, n) + if err != nil { + return TextList{}, err + } + return TextList{pl.List}, nil +} + +// At returns the i'th string in the list. +func (l TextList) At(i int) (string, error) { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return "", err + } + p, err := l.seg.readPtr(addr, l.depthLimit) + if err != nil { + return "", err + } + return p.Text(), nil +} + +// BytesAt returns the i'th element in the list as a byte slice. +// The underlying array of the slice is the segment data. +func (l TextList) BytesAt(i int) ([]byte, error) { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return nil, err + } + p, err := l.seg.readPtr(addr, l.depthLimit) + if err != nil { + return nil, err + } + return p.TextBytes(), nil +} + +// Set sets the i'th string in the list to v. +func (l TextList) Set(i int, v string) error { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return err + } + if v == "" { + return l.seg.writePtr(addr, Ptr{}, false) + } + p, err := NewText(l.seg, v) + if err != nil { + return err + } + return l.seg.writePtr(addr, p.List.ToPtr(), false) +} + +// String returns the list in Cap'n Proto schema format (e.g. `["foo", "bar"]`). +func (l TextList) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + s, err := l.BytesAt(i) + if err != nil { + buf = append(buf, ""...) + continue + } + buf = strquote.Append(buf, s) + } + buf = append(buf, ']') + return string(buf) +} + +// DataList is an array of pointers to data. +type DataList struct{ List } + +// NewDataList allocates a new list of data pointers, preferring placement in s. +func NewDataList(s *Segment, n int32) (DataList, error) { + pl, err := NewPointerList(s, n) + if err != nil { + return DataList{}, err + } + return DataList{pl.List}, nil +} + +// At returns the i'th data in the list. +func (l DataList) At(i int) ([]byte, error) { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return nil, err + } + p, err := l.seg.readPtr(addr, l.depthLimit) + if err != nil { + return nil, err + } + return p.Data(), nil +} + +// Set sets the i'th data in the list to v. +func (l DataList) Set(i int, v []byte) error { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return err + } + if len(v) == 0 { + return l.seg.writePtr(addr, Ptr{}, false) + } + p, err := NewData(l.seg, v) + if err != nil { + return err + } + return l.seg.writePtr(addr, p.List.ToPtr(), false) +} + +// String returns the list in Cap'n Proto schema format (e.g. `["foo", "bar"]`). +func (l DataList) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + s, err := l.At(i) + if err != nil { + buf = append(buf, ""...) + continue + } + buf = strquote.Append(buf, s) + } + buf = append(buf, ']') + return string(buf) +} + +// A VoidList is a list of zero-sized elements. +type VoidList struct{ List } + +// NewVoidList creates a list of voids. No allocation is performed; +// s is only used for Segment()'s return value. +func NewVoidList(s *Segment, n int32) VoidList { + return VoidList{List{ + seg: s, + length: n, + depthLimit: maxDepth, + }} +} + +// String returns the list in Cap'n Proto schema format (e.g. "[void, void, void]"). +func (l VoidList) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = append(buf, "void"...) + } + buf = append(buf, ']') + return string(buf) +} + +// A UInt8List is an array of UInt8 values. +type UInt8List struct{ List } + +// NewUInt8List creates a new list of UInt8, preferring placement in s. +func NewUInt8List(s *Segment, n int32) (UInt8List, error) { + l, err := newPrimitiveList(s, 1, n) + if err != nil { + return UInt8List{}, err + } + return UInt8List{l}, nil +} + +// NewText creates a new list of UInt8 from a string. +func NewText(s *Segment, v string) (UInt8List, error) { + // TODO(light): error if v is too long + l, err := NewUInt8List(s, int32(len(v)+1)) + if err != nil { + return UInt8List{}, err + } + copy(l.seg.slice(l.off, Size(len(v))), v) + return l, nil +} + +// NewTextFromBytes creates a NUL-terminated list of UInt8 from a byte slice. +func NewTextFromBytes(s *Segment, v []byte) (UInt8List, error) { + // TODO(light): error if v is too long + l, err := NewUInt8List(s, int32(len(v)+1)) + if err != nil { + return UInt8List{}, err + } + copy(l.seg.slice(l.off, Size(len(v))), v) + return l, nil +} + +// NewData creates a new list of UInt8 from a byte slice. +func NewData(s *Segment, v []byte) (UInt8List, error) { + // TODO(light): error if v is too long + l, err := NewUInt8List(s, int32(len(v))) + if err != nil { + return UInt8List{}, err + } + copy(l.seg.slice(l.off, Size(len(v))), v) + return l, nil +} + +// ToText attempts to convert p into Text. +// +// Deprecated: Use Ptr.Text. +func ToText(p Pointer) string { + return toPtr(p).TextDefault("") +} + +// ToTextDefault attempts to convert p into Text, returning def on failure. +// +// Deprecated: Use Ptr.TextDefault. +func ToTextDefault(p Pointer, def string) string { + return toPtr(p).TextDefault(def) +} + +// ToData attempts to convert p into Data. +// +// Deprecated: Use Ptr.Data. +func ToData(p Pointer) []byte { + return toPtr(p).DataDefault(nil) +} + +// ToDataDefault attempts to convert p into Data, returning def on failure. +// +// Deprecated: Use Ptr.DataDefault. +func ToDataDefault(p Pointer, def []byte) []byte { + return toPtr(p).DataDefault(def) +} + +func isOneByteList(p Ptr) bool { + return p.seg != nil && p.flags.ptrType() == listPtrType && p.size.isOneByte() && p.flags.listFlags()&isCompositeList == 0 +} + +// At returns the i'th element. +func (l UInt8List) At(i int) uint8 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 1}) + if err != nil { + return 0 + } + return l.seg.readUint8(addr) +} + +// Set sets the i'th element to v. +func (l UInt8List) Set(i int, v uint8) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 1}) + if err != nil { + panic(err) + } + l.seg.writeUint8(addr, v) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l UInt8List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendUint(buf, uint64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Int8List is an array of Int8 values. +type Int8List struct{ List } + +// NewInt8List creates a new list of Int8, preferring placement in s. +func NewInt8List(s *Segment, n int32) (Int8List, error) { + l, err := newPrimitiveList(s, 1, n) + if err != nil { + return Int8List{}, err + } + return Int8List{l}, nil +} + +// At returns the i'th element. +func (l Int8List) At(i int) int8 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 1}) + if err != nil { + return 0 + } + return int8(l.seg.readUint8(addr)) +} + +// Set sets the i'th element to v. +func (l Int8List) Set(i int, v int8) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 1}) + if err != nil { + panic(err) + } + l.seg.writeUint8(addr, uint8(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Int8List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendInt(buf, int64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// A UInt16List is an array of UInt16 values. +type UInt16List struct{ List } + +// NewUInt16List creates a new list of UInt16, preferring placement in s. +func NewUInt16List(s *Segment, n int32) (UInt16List, error) { + l, err := newPrimitiveList(s, 2, n) + if err != nil { + return UInt16List{}, err + } + return UInt16List{l}, nil +} + +// At returns the i'th element. +func (l UInt16List) At(i int) uint16 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 2}) + if err != nil { + return 0 + } + return l.seg.readUint16(addr) +} + +// Set sets the i'th element to v. +func (l UInt16List) Set(i int, v uint16) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 2}) + if err != nil { + panic(err) + } + l.seg.writeUint16(addr, v) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l UInt16List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendUint(buf, uint64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Int16List is an array of Int16 values. +type Int16List struct{ List } + +// NewInt16List creates a new list of Int16, preferring placement in s. +func NewInt16List(s *Segment, n int32) (Int16List, error) { + l, err := newPrimitiveList(s, 2, n) + if err != nil { + return Int16List{}, err + } + return Int16List{l}, nil +} + +// At returns the i'th element. +func (l Int16List) At(i int) int16 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 2}) + if err != nil { + return 0 + } + return int16(l.seg.readUint16(addr)) +} + +// Set sets the i'th element to v. +func (l Int16List) Set(i int, v int16) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 2}) + if err != nil { + panic(err) + } + l.seg.writeUint16(addr, uint16(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Int16List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendInt(buf, int64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// UInt32List is an array of UInt32 values. +type UInt32List struct{ List } + +// NewUInt32List creates a new list of UInt32, preferring placement in s. +func NewUInt32List(s *Segment, n int32) (UInt32List, error) { + l, err := newPrimitiveList(s, 4, n) + if err != nil { + return UInt32List{}, err + } + return UInt32List{l}, nil +} + +// At returns the i'th element. +func (l UInt32List) At(i int) uint32 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + return 0 + } + return l.seg.readUint32(addr) +} + +// Set sets the i'th element to v. +func (l UInt32List) Set(i int, v uint32) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + panic(err) + } + l.seg.writeUint32(addr, v) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l UInt32List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendUint(buf, uint64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Int32List is an array of Int32 values. +type Int32List struct{ List } + +// NewInt32List creates a new list of Int32, preferring placement in s. +func NewInt32List(s *Segment, n int32) (Int32List, error) { + l, err := newPrimitiveList(s, 4, n) + if err != nil { + return Int32List{}, err + } + return Int32List{l}, nil +} + +// At returns the i'th element. +func (l Int32List) At(i int) int32 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + return 0 + } + return int32(l.seg.readUint32(addr)) +} + +// Set sets the i'th element to v. +func (l Int32List) Set(i int, v int32) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + panic(err) + } + l.seg.writeUint32(addr, uint32(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Int32List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendInt(buf, int64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// UInt64List is an array of UInt64 values. +type UInt64List struct{ List } + +// NewUInt64List creates a new list of UInt64, preferring placement in s. +func NewUInt64List(s *Segment, n int32) (UInt64List, error) { + l, err := newPrimitiveList(s, 8, n) + if err != nil { + return UInt64List{}, err + } + return UInt64List{l}, nil +} + +// At returns the i'th element. +func (l UInt64List) At(i int) uint64 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + return 0 + } + return l.seg.readUint64(addr) +} + +// Set sets the i'th element to v. +func (l UInt64List) Set(i int, v uint64) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + panic(err) + } + l.seg.writeUint64(addr, v) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l UInt64List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendUint(buf, l.At(i), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Int64List is an array of Int64 values. +type Int64List struct{ List } + +// NewInt64List creates a new list of Int64, preferring placement in s. +func NewInt64List(s *Segment, n int32) (Int64List, error) { + l, err := newPrimitiveList(s, 8, n) + if err != nil { + return Int64List{}, err + } + return Int64List{l}, nil +} + +// At returns the i'th element. +func (l Int64List) At(i int) int64 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + return 0 + } + return int64(l.seg.readUint64(addr)) +} + +// Set sets the i'th element to v. +func (l Int64List) Set(i int, v int64) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + panic(err) + } + l.seg.writeUint64(addr, uint64(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Int64List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendInt(buf, l.At(i), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Float32List is an array of Float32 values. +type Float32List struct{ List } + +// NewFloat32List creates a new list of Float32, preferring placement in s. +func NewFloat32List(s *Segment, n int32) (Float32List, error) { + l, err := newPrimitiveList(s, 4, n) + if err != nil { + return Float32List{}, err + } + return Float32List{l}, nil +} + +// At returns the i'th element. +func (l Float32List) At(i int) float32 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + return 0 + } + return math.Float32frombits(l.seg.readUint32(addr)) +} + +// Set sets the i'th element to v. +func (l Float32List) Set(i int, v float32) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + panic(err) + } + l.seg.writeUint32(addr, math.Float32bits(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Float32List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendFloat(buf, float64(l.At(i)), 'g', -1, 32) + } + buf = append(buf, ']') + return string(buf) +} + +// Float64List is an array of Float64 values. +type Float64List struct{ List } + +// NewFloat64List creates a new list of Float64, preferring placement in s. +func NewFloat64List(s *Segment, n int32) (Float64List, error) { + l, err := newPrimitiveList(s, 8, n) + if err != nil { + return Float64List{}, err + } + return Float64List{l}, nil +} + +// At returns the i'th element. +func (l Float64List) At(i int) float64 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + return 0 + } + return math.Float64frombits(l.seg.readUint64(addr)) +} + +// Set sets the i'th element to v. +func (l Float64List) Set(i int, v float64) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + panic(err) + } + l.seg.writeUint64(addr, math.Float64bits(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Float64List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendFloat(buf, l.At(i), 'g', -1, 64) + } + buf = append(buf, ']') + return string(buf) +} + +type listFlags uint8 + +const ( + isCompositeList listFlags = 1 << iota + isBitList +) + +var errBitListStruct = errors.New("capnp: SetStruct called on bit list") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem.go new file mode 100644 index 00000000..e3ee869a --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem.go @@ -0,0 +1,913 @@ +package capnp + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" + + "zombiezen.com/go/capnproto2/internal/packed" +) + +// Security limits. Matches C++ implementation. +const ( + defaultTraverseLimit = 64 << 20 // 64 MiB + defaultDepthLimit = 64 + + maxStreamSegments = 512 + + defaultDecodeLimit = 64 << 20 // 64 MiB +) + +const maxDepth = ^uint(0) + +// A Message is a tree of Cap'n Proto objects, split into one or more +// segments of contiguous memory. The only required field is Arena. +// A Message is safe to read from multiple goroutines. +type Message struct { + // rlimit must be first so that it is 64-bit aligned. + // See sync/atomic docs. + rlimit ReadLimiter + rlimitInit sync.Once + + Arena Arena + + // CapTable is the indexed list of the clients referenced in the + // message. Capability pointers inside the message will use this table + // to map pointers to Clients. The table is usually populated by the + // RPC system. + // + // See https://capnproto.org/encoding.html#capabilities-interfaces for + // more details on the capability table. + CapTable []Client + + // TraverseLimit limits how many total bytes of data are allowed to be + // traversed while reading. Traversal is counted when a Struct or + // List is obtained. This means that calling a getter for the same + // sub-struct multiple times will cause it to be double-counted. Once + // the traversal limit is reached, pointer accessors will report + // errors. See https://capnproto.org/encoding.html#amplification-attack + // for more details on this security measure. + // + // If not set, this defaults to 64 MiB. + TraverseLimit uint64 + + // DepthLimit limits how deeply-nested a message structure can be. + // If not set, this defaults to 64. + DepthLimit uint + + // mu protects the following fields: + mu sync.Mutex + segs map[SegmentID]*Segment + firstSeg Segment // Preallocated first segment. msg is non-nil once initialized. +} + +// NewMessage creates a message with a new root and returns the first +// segment. It is an error to call NewMessage on an arena with data in it. +func NewMessage(arena Arena) (msg *Message, first *Segment, err error) { + msg = &Message{Arena: arena} + switch arena.NumSegments() { + case 0: + first, err = msg.allocSegment(wordSize) + if err != nil { + return nil, nil, err + } + case 1: + first, err = msg.Segment(0) + if err != nil { + return nil, nil, err + } + if len(first.data) > 0 { + return nil, nil, errHasData + } + default: + return nil, nil, errHasData + } + if first.ID() != 0 { + return nil, nil, errors.New("capnp: arena allocated first segment with non-zero ID") + } + seg, _, err := alloc(first, wordSize) // allocate root + if err != nil { + return nil, nil, err + } + if seg != first { + return nil, nil, errors.New("capnp: arena didn't allocate first word in first segment") + } + return msg, first, nil +} + +// Reset resets a message to use a different arena, allowing a single +// Message to be reused for reading multiple messages. This invalidates +// any existing pointers in the Message, so use with caution. +func (m *Message) Reset(arena Arena) { + m.mu.Lock() + m.Arena = arena + m.CapTable = nil + m.segs = nil + m.firstSeg = Segment{} + m.mu.Unlock() + if m.TraverseLimit == 0 { + m.ReadLimiter().Reset(defaultTraverseLimit) + } else { + m.ReadLimiter().Reset(m.TraverseLimit) + } +} + +// Root returns the pointer to the message's root object. +// +// Deprecated: Use RootPtr. +func (m *Message) Root() (Pointer, error) { + p, err := m.RootPtr() + return p.toPointer(), err +} + +// RootPtr returns the pointer to the message's root object. +func (m *Message) RootPtr() (Ptr, error) { + s, err := m.Segment(0) + if err != nil { + return Ptr{}, err + } + return s.root().PtrAt(0) +} + +// SetRoot sets the message's root object to p. +// +// Deprecated: Use SetRootPtr. +func (m *Message) SetRoot(p Pointer) error { + return m.SetRootPtr(toPtr(p)) +} + +// SetRootPtr sets the message's root object to p. +func (m *Message) SetRootPtr(p Ptr) error { + s, err := m.Segment(0) + if err != nil { + return err + } + return s.root().SetPtr(0, p) +} + +// AddCap appends a capability to the message's capability table and +// returns its ID. +func (m *Message) AddCap(c Client) CapabilityID { + n := CapabilityID(len(m.CapTable)) + m.CapTable = append(m.CapTable, c) + return n +} + +// ReadLimiter returns the message's read limiter. Useful if you want +// to reset the traversal limit while reading. +func (m *Message) ReadLimiter() *ReadLimiter { + m.rlimitInit.Do(func() { + if m.TraverseLimit == 0 { + m.rlimit.limit = defaultTraverseLimit + } else { + m.rlimit.limit = m.TraverseLimit + } + }) + return &m.rlimit +} + +func (m *Message) depthLimit() uint { + if m.DepthLimit != 0 { + return m.DepthLimit + } + return defaultDepthLimit +} + +// NumSegments returns the number of segments in the message. +func (m *Message) NumSegments() int64 { + return int64(m.Arena.NumSegments()) +} + +// Segment returns the segment with the given ID. +func (m *Message) Segment(id SegmentID) (*Segment, error) { + if isInt32Bit && id > maxInt32 { + return nil, errSegment32Bit + } + if int64(id) >= m.Arena.NumSegments() { + return nil, errSegmentOutOfBounds + } + m.mu.Lock() + if seg := m.segment(id); seg != nil { + m.mu.Unlock() + return seg, nil + } + data, err := m.Arena.Data(id) + if err != nil { + m.mu.Unlock() + return nil, err + } + seg := m.setSegment(id, data) + m.mu.Unlock() + return seg, nil +} + +// segment returns the segment with the given ID. +// The caller must be holding m.mu. +func (m *Message) segment(id SegmentID) *Segment { + if m.segs == nil { + if id == 0 && m.firstSeg.msg != nil { + return &m.firstSeg + } + return nil + } + return m.segs[id] +} + +// setSegment creates or updates the Segment with the given ID. +// The caller must be holding m.mu. +func (m *Message) setSegment(id SegmentID, data []byte) *Segment { + if m.segs == nil { + if id == 0 { + m.firstSeg = Segment{ + id: id, + msg: m, + data: data, + } + return &m.firstSeg + } + m.segs = make(map[SegmentID]*Segment) + if m.firstSeg.msg != nil { + m.segs[0] = &m.firstSeg + } + } else if seg := m.segs[id]; seg != nil { + seg.data = data + return seg + } + seg := &Segment{ + id: id, + msg: m, + data: data, + } + m.segs[id] = seg + return seg +} + +// allocSegment creates or resizes an existing segment such that +// cap(seg.Data) - len(seg.Data) >= sz. +func (m *Message) allocSegment(sz Size) (*Segment, error) { + m.mu.Lock() + if m.segs == nil && m.firstSeg.msg != nil { + m.segs = make(map[SegmentID]*Segment) + m.segs[0] = &m.firstSeg + } + id, data, err := m.Arena.Allocate(sz, m.segs) + if err != nil { + m.mu.Unlock() + return nil, err + } + if isInt32Bit && id > maxInt32 { + m.mu.Unlock() + return nil, errSegment32Bit + } + seg := m.setSegment(id, data) + m.mu.Unlock() + return seg, nil +} + +// alloc allocates sz zero-filled bytes. It prefers using s, but may +// use a different segment in the same message if there's not sufficient +// capacity. +func alloc(s *Segment, sz Size) (*Segment, Address, error) { + sz = sz.padToWord() + if sz > maxSize-wordSize { + return nil, 0, errOverflow + } + + if !hasCapacity(s.data, sz) { + var err error + s, err = s.msg.allocSegment(sz) + if err != nil { + return nil, 0, err + } + } + + addr := Address(len(s.data)) + end, ok := addr.addSize(sz) + if !ok { + return nil, 0, errOverflow + } + space := s.data[len(s.data):end] + s.data = s.data[:end] + for i := range space { + space[i] = 0 + } + return s, addr, nil +} + +// An Arena loads and allocates segments for a Message. +type Arena interface { + // NumSegments returns the number of segments in the arena. + // This must not be larger than 1<<32. + NumSegments() int64 + + // Data loads the data for the segment with the given ID. IDs are in + // the range [0, NumSegments()). + // must be tightly packed in the range [0, NumSegments()). + Data(id SegmentID) ([]byte, error) + + // Allocate selects a segment to place a new object in, creating a + // segment or growing the capacity of a previously loaded segment if + // necessary. If Allocate does not return an error, then the + // difference of the capacity and the length of the returned slice + // must be at least minsz. segs is a map of segment slices returned + // by the Data method keyed by ID (although the length of these slices + // may have changed by previous allocations). Allocate must not + // modify segs. + // + // If Allocate creates a new segment, the ID must be one larger than + // the last segment's ID or zero if it is the first segment. + // + // If Allocate returns an previously loaded segment's ID, then the + // arena is responsible for preserving the existing data in the + // returned byte slice. + Allocate(minsz Size, segs map[SegmentID]*Segment) (SegmentID, []byte, error) +} + +type singleSegmentArena []byte + +// SingleSegment returns a new arena with an expanding single-segment +// buffer. b can be used to populate the segment for reading or to +// reserve memory of a specific size. A SingleSegment arena does not +// return errors unless you attempt to access another segment. +func SingleSegment(b []byte) Arena { + ssa := new(singleSegmentArena) + *ssa = b + return ssa +} + +func (ssa *singleSegmentArena) NumSegments() int64 { + return 1 +} + +func (ssa *singleSegmentArena) Data(id SegmentID) ([]byte, error) { + if id != 0 { + return nil, errSegmentOutOfBounds + } + return *ssa, nil +} + +func (ssa *singleSegmentArena) Allocate(sz Size, segs map[SegmentID]*Segment) (SegmentID, []byte, error) { + data := []byte(*ssa) + if segs[0] != nil { + data = segs[0].data + } + if len(data)%int(wordSize) != 0 { + return 0, nil, errors.New("capnp: segment size is not a multiple of word size") + } + if hasCapacity(data, sz) { + return 0, data, nil + } + inc, err := nextAlloc(int64(len(data)), int64(maxSegmentSize()), sz) + if err != nil { + return 0, nil, fmt.Errorf("capnp: alloc %d bytes: %v", sz, err) + } + buf := make([]byte, len(data), cap(data)+inc) + copy(buf, data) + *ssa = buf + return 0, *ssa, nil +} + +type roSingleSegment []byte + +func (ss roSingleSegment) NumSegments() int64 { + return 1 +} + +func (ss roSingleSegment) Data(id SegmentID) ([]byte, error) { + if id != 0 { + return nil, errSegmentOutOfBounds + } + return ss, nil +} + +func (ss roSingleSegment) Allocate(sz Size, segs map[SegmentID]*Segment) (SegmentID, []byte, error) { + return 0, nil, errors.New("capnp: segment is read-only") +} + +type multiSegmentArena [][]byte + +// MultiSegment returns a new arena that allocates new segments when +// they are full. b can be used to populate the buffer for reading or +// to reserve memory of a specific size. +func MultiSegment(b [][]byte) Arena { + msa := new(multiSegmentArena) + *msa = b + return msa +} + +// demuxArena slices b into a multi-segment arena. +func demuxArena(hdr streamHeader, data []byte) (Arena, error) { + segs := make([][]byte, int(hdr.maxSegment())+1) + for i := range segs { + sz, err := hdr.segmentSize(uint32(i)) + if err != nil { + return nil, err + } + segs[i], data = data[:sz:sz], data[sz:] + } + return MultiSegment(segs), nil +} + +func (msa *multiSegmentArena) NumSegments() int64 { + return int64(len(*msa)) +} + +func (msa *multiSegmentArena) Data(id SegmentID) ([]byte, error) { + if int64(id) >= int64(len(*msa)) { + return nil, errSegmentOutOfBounds + } + return (*msa)[id], nil +} + +func (msa *multiSegmentArena) Allocate(sz Size, segs map[SegmentID]*Segment) (SegmentID, []byte, error) { + var total int64 + for i, data := range *msa { + id := SegmentID(i) + if s := segs[id]; s != nil { + data = s.data + } + if hasCapacity(data, sz) { + return id, data, nil + } + total += int64(cap(data)) + if total < 0 { + // Overflow. + return 0, nil, fmt.Errorf("capnp: alloc %d bytes: message too large", sz) + } + } + n, err := nextAlloc(total, 1<<63-1, sz) + if err != nil { + return 0, nil, fmt.Errorf("capnp: alloc %d bytes: %v", sz, err) + } + buf := make([]byte, 0, n) + id := SegmentID(len(*msa)) + *msa = append(*msa, buf) + return id, buf, nil +} + +// nextAlloc computes how much more space to allocate given the number +// of bytes allocated in the entire message and the requested number of +// bytes. It will always return a multiple of wordSize. max must be a +// multiple of wordSize. The sum of curr and the returned size will +// always be less than max. +func nextAlloc(curr, max int64, req Size) (int, error) { + if req == 0 { + return 0, nil + } + maxinc := int64(1<<32 - 8) // largest word-aligned Size + if isInt32Bit { + maxinc = 1<<31 - 8 // largest word-aligned int + } + if int64(req) > maxinc { + return 0, errors.New("allocation too large") + } + req = req.padToWord() + want := curr + int64(req) + if want <= curr || want > max { + return 0, errors.New("allocation overflows message size") + } + new := curr + double := new + new + switch { + case want < 1024: + next := (1024 - curr + 7) &^ 7 + if next < curr { + return int((curr + 7) &^ 7), nil + } + return int(next), nil + case want > double: + return int(req), nil + default: + for 0 < new && new < want { + new += new / 4 + } + if new <= 0 { + return int(req), nil + } + delta := new - curr + if delta > maxinc { + return int(maxinc), nil + } + return int((delta + 7) &^ 7), nil + } +} + +// A Decoder represents a framer that deserializes a particular Cap'n +// Proto input stream. +type Decoder struct { + r io.Reader + + segbuf [msgHeaderSize]byte + hdrbuf []byte + + reuse bool + buf []byte + msg Message + arena roSingleSegment + + // Maximum number of bytes that can be read per call to Decode. + // If not set, a reasonable default is used. + MaxMessageSize uint64 +} + +// NewDecoder creates a new Cap'n Proto framer that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// NewPackedDecoder creates a new Cap'n Proto framer that reads from a +// packed stream r. +func NewPackedDecoder(r io.Reader) *Decoder { + return NewDecoder(packed.NewReader(bufio.NewReader(r))) +} + +// Decode reads a message from the decoder stream. +func (d *Decoder) Decode() (*Message, error) { + maxSize := d.MaxMessageSize + if maxSize == 0 { + maxSize = defaultDecodeLimit + } + if _, err := io.ReadFull(d.r, d.segbuf[:]); err != nil { + return nil, err + } + maxSeg := binary.LittleEndian.Uint32(d.segbuf[:]) + if maxSeg > maxStreamSegments { + return nil, errTooManySegments + } + hdrSize := streamHeaderSize(maxSeg) + if hdrSize > maxSize || hdrSize > (1<<31-1) { + return nil, errDecodeLimit + } + d.hdrbuf = resizeSlice(d.hdrbuf, int(hdrSize)) + copy(d.hdrbuf, d.segbuf[:]) + if _, err := io.ReadFull(d.r, d.hdrbuf[msgHeaderSize:]); err != nil { + return nil, err + } + hdr, _, err := parseStreamHeader(d.hdrbuf) + if err != nil { + return nil, err + } + total, err := hdr.totalSize() + if err != nil { + return nil, err + } + // TODO(someday): if total size is greater than can fit in one buffer, + // attempt to allocate buffer per segment. + if total > maxSize-hdrSize || total > (1<<31-1) { + return nil, errDecodeLimit + } + if !d.reuse { + buf := make([]byte, int(total)) + if _, err := io.ReadFull(d.r, buf); err != nil { + return nil, err + } + arena, err := demuxArena(hdr, buf) + if err != nil { + return nil, err + } + return &Message{Arena: arena}, nil + } + d.buf = resizeSlice(d.buf, int(total)) + if _, err := io.ReadFull(d.r, d.buf); err != nil { + return nil, err + } + var arena Arena + if hdr.maxSegment() == 0 { + d.arena = d.buf[:len(d.buf):len(d.buf)] + arena = &d.arena + } else { + var err error + arena, err = demuxArena(hdr, d.buf) + if err != nil { + return nil, err + } + } + d.msg.Reset(arena) + return &d.msg, nil +} + +func resizeSlice(b []byte, size int) []byte { + if cap(b) < size { + return make([]byte, size) + } + return b[:size] +} + +// ReuseBuffer causes the decoder to reuse its buffer on subsequent decodes. +// The decoder may return messages that cannot handle allocations. +func (d *Decoder) ReuseBuffer() { + d.reuse = true +} + +// Unmarshal reads an unpacked serialized stream into a message. No +// copying is performed, so the objects in the returned message read +// directly from data. +func Unmarshal(data []byte) (*Message, error) { + if len(data) == 0 { + return nil, io.EOF + } + hdr, data, err := parseStreamHeader(data) + if err != nil { + return nil, err + } + if tot, err := hdr.totalSize(); err != nil { + return nil, err + } else if tot > uint64(len(data)) { + return nil, io.ErrUnexpectedEOF + } + arena, err := demuxArena(hdr, data) + if err != nil { + return nil, err + } + return &Message{Arena: arena}, nil +} + +// UnmarshalPacked reads a packed serialized stream into a message. +func UnmarshalPacked(data []byte) (*Message, error) { + if len(data) == 0 { + return nil, io.EOF + } + data, err := packed.Unpack(nil, data) + if err != nil { + return nil, err + } + return Unmarshal(data) +} + +// MustUnmarshalRoot reads an unpacked serialized stream and returns +// its root pointer. If there is any error, it panics. +// +// Deprecated: Use MustUnmarshalRootPtr. +func MustUnmarshalRoot(data []byte) Pointer { + msg, err := Unmarshal(data) + if err != nil { + panic(err) + } + p, err := msg.Root() + if err != nil { + panic(err) + } + return p +} + +// MustUnmarshalRootPtr reads an unpacked serialized stream and returns +// its root pointer. If there is any error, it panics. +func MustUnmarshalRootPtr(data []byte) Ptr { + msg, err := Unmarshal(data) + if err != nil { + panic(err) + } + p, err := msg.RootPtr() + if err != nil { + panic(err) + } + return p +} + +// An Encoder represents a framer for serializing a particular Cap'n +// Proto stream. +type Encoder struct { + w io.Writer + hdrbuf []byte + bufs [][]byte + + packed bool + packbuf []byte +} + +// NewEncoder creates a new Cap'n Proto framer that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} + +// NewPackedEncoder creates a new Cap'n Proto framer that writes to a +// packed stream w. +func NewPackedEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, packed: true} +} + +// Encode writes a message to the encoder stream. +func (e *Encoder) Encode(m *Message) error { + nsegs := m.NumSegments() + if nsegs == 0 { + return errMessageEmpty + } + e.bufs = append(e.bufs[:0], nil) // first element is placeholder for header + maxSeg := uint32(nsegs - 1) + hdrSize := streamHeaderSize(maxSeg) + if uint64(cap(e.hdrbuf)) < hdrSize { + e.hdrbuf = make([]byte, 0, hdrSize) + } + e.hdrbuf = appendUint32(e.hdrbuf[:0], maxSeg) + for i := int64(0); i < nsegs; i++ { + s, err := m.Segment(SegmentID(i)) + if err != nil { + return err + } + n := len(s.data) + if int64(n) > int64(maxSize) { + return errSegmentTooLarge + } + e.hdrbuf = appendUint32(e.hdrbuf, uint32(Size(n)/wordSize)) + e.bufs = append(e.bufs, s.data) + } + if len(e.hdrbuf)%int(wordSize) != 0 { + e.hdrbuf = appendUint32(e.hdrbuf, 0) + } + e.bufs[0] = e.hdrbuf + if e.packed { + return e.writePacked(e.bufs) + } + return e.write(e.bufs) +} + +func (e *Encoder) writePacked(bufs [][]byte) error { + for _, b := range bufs { + e.packbuf = packed.Pack(e.packbuf[:0], b) + if _, err := e.w.Write(e.packbuf); err != nil { + return err + } + } + return nil +} + +func (m *Message) segmentSizes() ([]Size, error) { + nsegs := m.NumSegments() + sizes := make([]Size, nsegs) + for i := int64(0); i < nsegs; i++ { + s, err := m.Segment(SegmentID(i)) + if err != nil { + return sizes[:i], err + } + n := len(s.data) + if int64(n) > int64(maxSize) { + return sizes[:i], errSegmentTooLarge + } + sizes[i] = Size(n) + } + return sizes, nil +} + +// Marshal concatenates the segments in the message into a single byte +// slice including framing. +func (m *Message) Marshal() ([]byte, error) { + // Compute buffer size. + // TODO(light): error out if too many segments + nsegs := m.NumSegments() + if nsegs == 0 { + return nil, errMessageEmpty + } + maxSeg := uint32(nsegs - 1) + hdrSize := streamHeaderSize(maxSeg) + sizes, err := m.segmentSizes() + if err != nil { + return nil, err + } + // TODO(light): error out if too large + total := uint64(hdrSize) + totalSize(sizes) + + // Fill in buffer. + buf := make([]byte, hdrSize, total) + // TODO: remove marshalStreamHeader and inline. + marshalStreamHeader(buf, sizes) + for i := int64(0); i < nsegs; i++ { + s, err := m.Segment(SegmentID(i)) + if err != nil { + return nil, err + } + buf = append(buf, s.data...) + } + return buf, nil +} + +// MarshalPacked marshals the message in packed form. +func (m *Message) MarshalPacked() ([]byte, error) { + data, err := m.Marshal() + if err != nil { + return nil, err + } + buf := make([]byte, 0, len(data)) + buf = packed.Pack(buf, data) + return buf, nil +} + +// Stream header sizes. +const ( + msgHeaderSize = 4 + segHeaderSize = 4 +) + +// streamHeaderSize returns the size of the header, given the +// first 32-bit number. +func streamHeaderSize(n uint32) uint64 { + return (msgHeaderSize + segHeaderSize*(uint64(n)+1) + 7) &^ 7 +} + +// marshalStreamHeader marshals the sizes into the byte slice, which +// must be of size streamHeaderSize(len(sizes) - 1). +// +// TODO: remove marshalStreamHeader and inline. +func marshalStreamHeader(b []byte, sizes []Size) { + binary.LittleEndian.PutUint32(b, uint32(len(sizes)-1)) + for i, sz := range sizes { + loc := msgHeaderSize + i*segHeaderSize + binary.LittleEndian.PutUint32(b[loc:], uint32(sz/Size(wordSize))) + } +} + +// appendUint32 appends a uint32 to a byte slice and returns the +// new slice. +func appendUint32(b []byte, v uint32) []byte { + b = append(b, 0, 0, 0, 0) + binary.LittleEndian.PutUint32(b[len(b)-4:], v) + return b +} + +type streamHeader struct { + b []byte +} + +// parseStreamHeader parses the header of the stream framing format. +func parseStreamHeader(data []byte) (h streamHeader, tail []byte, err error) { + if uint64(len(data)) < streamHeaderSize(0) { + return streamHeader{}, nil, io.ErrUnexpectedEOF + } + maxSeg := binary.LittleEndian.Uint32(data) + // TODO(light): check int + hdrSize := streamHeaderSize(maxSeg) + if uint64(len(data)) < hdrSize { + return streamHeader{}, nil, io.ErrUnexpectedEOF + } + return streamHeader{b: data}, data[hdrSize:], nil +} + +func (h streamHeader) maxSegment() uint32 { + return binary.LittleEndian.Uint32(h.b) +} + +func (h streamHeader) segmentSize(i uint32) (Size, error) { + s := binary.LittleEndian.Uint32(h.b[msgHeaderSize+i*segHeaderSize:]) + sz, ok := wordSize.times(int32(s)) + if !ok { + return 0, errSegmentTooLarge + } + return sz, nil +} + +func (h streamHeader) totalSize() (uint64, error) { + var sum uint64 + for i := uint64(0); i <= uint64(h.maxSegment()); i++ { + x, err := h.segmentSize(uint32(i)) + if err != nil { + return sum, err + } + sum += uint64(x) + } + return sum, nil +} + +func hasCapacity(b []byte, sz Size) bool { + return sz <= Size(cap(b)-len(b)) +} + +func totalSize(s []Size) uint64 { + var sum uint64 + for _, sz := range s { + sum += uint64(sz) + } + return sum +} + +const ( + maxInt32 = 0x7fffffff + maxInt = int(^uint(0) >> 1) + + isInt32Bit = maxInt == maxInt32 +) + +// maxSegmentSize returns the maximum permitted size of a single segment +// on this platform. +// +// This is effectively a compile-time constant, but can't be represented +// as a constant because it requires a conditional. It is trivially +// inlinable and optimizable, so should act like one. +func maxSegmentSize() Size { + if isInt32Bit { + return Size(maxInt32 - 7) + } else { + return maxSize - 7 + } +} + +var ( + errSegmentOutOfBounds = errors.New("capnp: segment ID out of bounds") + errSegment32Bit = errors.New("capnp: segment ID larger than 31 bits") + errMessageEmpty = errors.New("capnp: marshalling an empty message") + errHasData = errors.New("capnp: NewMessage called on arena with data") + errSegmentTooLarge = errors.New("capnp: segment too large") + errTooManySegments = errors.New("capnp: too many segments to decode") + errDecodeLimit = errors.New("capnp: message too large") +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_18.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_18.go new file mode 100644 index 00000000..d2853072 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_18.go @@ -0,0 +1,10 @@ +// +build go1.8 + +package capnp + +import "net" + +func (e *Encoder) write(bufs [][]byte) error { + _, err := (*net.Buffers)(&bufs).WriteTo(e.w) + return err +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_other.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_other.go new file mode 100644 index 00000000..ba1ab667 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_other.go @@ -0,0 +1,12 @@ +// +build !go1.8 + +package capnp + +func (e *Encoder) write(bufs [][]byte) error { + for _, b := range bufs { + if _, err := e.w.Write(b); err != nil { + return err + } + } + return nil +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/pointer.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/pointer.go new file mode 100644 index 00000000..9b69e6b3 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/pointer.go @@ -0,0 +1,304 @@ +package capnp + +// A Ptr is a reference to a Cap'n Proto struct, list, or interface. +// The zero value is a null pointer. +type Ptr struct { + seg *Segment + off Address + lenOrCap uint32 + size ObjectSize + depthLimit uint + flags ptrFlags +} + +func toPtr(p Pointer) Ptr { + if p == nil { + return Ptr{} + } + switch p := p.underlying().(type) { + case Struct: + return p.ToPtr() + case List: + return p.ToPtr() + case Interface: + return p.ToPtr() + } + return Ptr{} +} + +// Struct converts p to a Struct. If p does not hold a Struct pointer, +// the zero value is returned. +func (p Ptr) Struct() Struct { + if p.flags.ptrType() != structPtrType { + return Struct{} + } + return Struct{ + seg: p.seg, + off: p.off, + size: p.size, + flags: p.flags.structFlags(), + depthLimit: p.depthLimit, + } +} + +// StructDefault attempts to convert p into a struct, reading the +// default value from def if p is not a struct. +func (p Ptr) StructDefault(def []byte) (Struct, error) { + s := p.Struct() + if s.seg == nil { + if def == nil { + return Struct{}, nil + } + defp, err := unmarshalDefault(def) + if err != nil { + return Struct{}, err + } + return defp.Struct(), nil + } + return s, nil +} + +// List converts p to a List. If p does not hold a List pointer, +// the zero value is returned. +func (p Ptr) List() List { + if p.flags.ptrType() != listPtrType { + return List{} + } + return List{ + seg: p.seg, + off: p.off, + length: int32(p.lenOrCap), + size: p.size, + flags: p.flags.listFlags(), + depthLimit: p.depthLimit, + } +} + +// ListDefault attempts to convert p into a list, reading the default +// value from def if p is not a list. +func (p Ptr) ListDefault(def []byte) (List, error) { + l := p.List() + if l.seg == nil { + if def == nil { + return List{}, nil + } + defp, err := unmarshalDefault(def) + if err != nil { + return List{}, err + } + return defp.List(), nil + } + return l, nil +} + +// Interface converts p to an Interface. If p does not hold a List +// pointer, the zero value is returned. +func (p Ptr) Interface() Interface { + if p.flags.ptrType() != interfacePtrType { + return Interface{} + } + return Interface{ + seg: p.seg, + cap: CapabilityID(p.lenOrCap), + } +} + +// Text attempts to convert p into Text, returning an empty string if +// p is not a valid 1-byte list pointer. +func (p Ptr) Text() string { + b, ok := p.text() + if !ok { + return "" + } + return string(b) +} + +// TextDefault attempts to convert p into Text, returning def if p is +// not a valid 1-byte list pointer. +func (p Ptr) TextDefault(def string) string { + b, ok := p.text() + if !ok { + return def + } + return string(b) +} + +// TextBytes attempts to convert p into Text, returning nil if p is not +// a valid 1-byte list pointer. It returns a slice directly into the +// segment. +func (p Ptr) TextBytes() []byte { + b, ok := p.text() + if !ok { + return nil + } + return b +} + +// TextBytesDefault attempts to convert p into Text, returning def if p +// is not a valid 1-byte list pointer. It returns a slice directly into +// the segment. +func (p Ptr) TextBytesDefault(def string) []byte { + b, ok := p.text() + if !ok { + return []byte(def) + } + return b +} + +func (p Ptr) text() (b []byte, ok bool) { + if !isOneByteList(p) { + return nil, false + } + l := p.List() + b = l.seg.slice(l.off, Size(l.length)) + if len(b) == 0 || b[len(b)-1] != 0 { + // Text must be null-terminated. + return nil, false + } + return b[:len(b)-1 : len(b)], true +} + +// Data attempts to convert p into Data, returning nil if p is not a +// valid 1-byte list pointer. +func (p Ptr) Data() []byte { + return p.DataDefault(nil) +} + +// DataDefault attempts to convert p into Data, returning def if p is +// not a valid 1-byte list pointer. +func (p Ptr) DataDefault(def []byte) []byte { + if !isOneByteList(p) { + return def + } + l := p.List() + b := l.seg.slice(l.off, Size(l.length)) + if b == nil { + return def + } + return b +} + +func (p Ptr) toPointer() Pointer { + if p.seg == nil { + return nil + } + switch p.flags.ptrType() { + case structPtrType: + return p.Struct() + case listPtrType: + return p.List() + case interfacePtrType: + return p.Interface() + } + return nil +} + +// IsValid reports whether p is valid. +func (p Ptr) IsValid() bool { + return p.seg != nil +} + +// Segment returns the segment this pointer points into. +// If nil, then this is an invalid pointer. +func (p Ptr) Segment() *Segment { + return p.seg +} + +// Default returns p if it is valid, otherwise it unmarshals def. +func (p Ptr) Default(def []byte) (Ptr, error) { + if !p.IsValid() { + return unmarshalDefault(def) + } + return p, nil +} + +// SamePtr reports whether p and q refer to the same object. +func SamePtr(p, q Ptr) bool { + return p.seg == q.seg && p.off == q.off +} + +// A value that implements Pointer is a reference to a Cap'n Proto object. +// +// Deprecated: Using this type introduces an unnecessary allocation. +// Use Ptr instead. +type Pointer interface { + // Segment returns the segment this pointer points into. + // If nil, then this is an invalid pointer. + Segment() *Segment + + // HasData reports whether the object referenced by the pointer has + // non-zero size. + HasData() bool + + // underlying returns a Pointer that is one of a Struct, a List, or an + // Interface. + underlying() Pointer +} + +// IsValid reports whether p is valid. +// +// Deprecated: Use Ptr.IsValid instead. +func IsValid(p Pointer) bool { + return p != nil && p.Segment() != nil +} + +// HasData reports whether p has non-zero size. +// +// Deprecated: There are usually better ways to determine this +// information: length of a list, checking fields, or using HasFoo +// accessors. +func HasData(p Pointer) bool { + return IsValid(p) && p.HasData() +} + +// PointerDefault returns p if it is valid, otherwise it unmarshals def. +// +// Deprecated: Use Ptr.Default. +func PointerDefault(p Pointer, def []byte) (Pointer, error) { + pp, err := toPtr(p).Default(def) + return pp.toPointer(), err +} + +func unmarshalDefault(def []byte) (Ptr, error) { + msg, err := Unmarshal(def) + if err != nil { + return Ptr{}, err + } + p, err := msg.RootPtr() + if err != nil { + return Ptr{}, err + } + return p, nil +} + +type ptrFlags uint8 + +const interfacePtrFlag ptrFlags = interfacePtrType << 6 + +func structPtrFlag(f structFlags) ptrFlags { + return structPtrType<<6 | ptrFlags(f)&ptrLowerMask +} + +func listPtrFlag(f listFlags) ptrFlags { + return listPtrType<<6 | ptrFlags(f)&ptrLowerMask +} + +const ( + structPtrType = iota + listPtrType + interfacePtrType +) + +func (f ptrFlags) ptrType() int { + return int(f >> 6) +} + +const ptrLowerMask ptrFlags = 0x3f + +func (f ptrFlags) listFlags() listFlags { + return listFlags(f & ptrLowerMask) +} + +func (f ptrFlags) structFlags() structFlags { + return structFlags(f & ptrLowerMask) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/rawpointer.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/rawpointer.go new file mode 100644 index 00000000..b72e2c3d --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/rawpointer.go @@ -0,0 +1,189 @@ +package capnp + +// pointerOffset is an address offset in multiples of word size. +type pointerOffset int32 + +// resolve returns an absolute address relative to a base address. +// For near pointers, the base is the end of the near pointer. +// For far pointers, the base is zero (the beginning of the segment). +func (off pointerOffset) resolve(base Address) (_ Address, ok bool) { + if off == 0 { + return base, true + } + addr := base + Address(off*pointerOffset(wordSize)) + return addr, (addr > base || off < 0) && (addr < base || off > 0) +} + +// nearPointerOffset computes the offset for a pointer at paddr to point to addr. +func nearPointerOffset(paddr, addr Address) pointerOffset { + return pointerOffset(addr/Address(wordSize) - paddr/Address(wordSize) - 1) +} + +// rawPointer is an encoded pointer. +type rawPointer uint64 + +// rawStructPointer returns a struct pointer. The offset is from the +// end of the pointer to the start of the struct. +func rawStructPointer(off pointerOffset, sz ObjectSize) rawPointer { + return rawPointer(structPointer) | rawPointer(uint32(off)<<2) | rawPointer(sz.dataWordCount())<<32 | rawPointer(sz.PointerCount)<<48 +} + +// rawListPointer returns a list pointer. The offset is the number of +// words relative to the end of the pointer that the list starts. If +// listType is compositeList, then length is the number of words +// that the list occupies, otherwise it is the number of elements in +// the list. +func rawListPointer(off pointerOffset, listType listType, length int32) rawPointer { + return rawPointer(listPointer) | rawPointer(uint32(off)<<2) | rawPointer(listType)<<32 | rawPointer(length)<<35 +} + +// rawInterfacePointer returns an interface pointer that references +// a capability number. +func rawInterfacePointer(capability CapabilityID) rawPointer { + return rawPointer(otherPointer) | rawPointer(capability)<<32 +} + +// rawFarPointer returns a pointer to a pointer in another segment. +func rawFarPointer(segID SegmentID, off Address) rawPointer { + return rawPointer(farPointer) | rawPointer(off&^7) | (rawPointer(segID) << 32) +} + +// rawDoubleFarPointer returns a pointer to a pointer in another segment. +func rawDoubleFarPointer(segID SegmentID, off Address) rawPointer { + return rawPointer(doubleFarPointer) | rawPointer(off&^7) | (rawPointer(segID) << 32) +} + +// landingPadNearPointer converts a double-far pointer landing pad into +// a near pointer in the destination segment. Its offset will be +// relative to the beginning of the segment. tag must be either a +// struct or a list pointer. +func landingPadNearPointer(far, tag rawPointer) rawPointer { + // Replace tag's offset with far's offset. + // far's offset (29-bit unsigned) just needs to be shifted down to + // make it into a signed 30-bit value. + return tag&^0xfffffffc | rawPointer(uint32(far&^3)>>1) +} + +type pointerType int + +// Raw pointer types. +const ( + structPointer pointerType = 0 + listPointer pointerType = 1 + farPointer pointerType = 2 + doubleFarPointer pointerType = 6 + otherPointer pointerType = 3 +) + +func (p rawPointer) pointerType() pointerType { + t := pointerType(p & 3) + if t == farPointer { + return pointerType(p & 7) + } + return t +} + +func (p rawPointer) structSize() ObjectSize { + c := uint16(p >> 32) + d := uint16(p >> 48) + return ObjectSize{ + DataSize: Size(c) * wordSize, + PointerCount: d, + } +} + +type listType int + +// Raw list pointer types. +const ( + voidList listType = 0 + bit1List listType = 1 + byte1List listType = 2 + byte2List listType = 3 + byte4List listType = 4 + byte8List listType = 5 + pointerList listType = 6 + compositeList listType = 7 +) + +func (p rawPointer) listType() listType { + return listType((p >> 32) & 7) +} + +func (p rawPointer) numListElements() int32 { + return int32(p >> 35) +} + +// elementSize returns the size of an individual element in the list referenced by p. +func (p rawPointer) elementSize() ObjectSize { + switch p.listType() { + case voidList: + return ObjectSize{} + case bit1List: + // Size is ignored on bit lists. + return ObjectSize{} + case byte1List: + return ObjectSize{DataSize: 1} + case byte2List: + return ObjectSize{DataSize: 2} + case byte4List: + return ObjectSize{DataSize: 4} + case byte8List: + return ObjectSize{DataSize: 8} + case pointerList: + return ObjectSize{PointerCount: 1} + default: + panic("elementSize not supposed to be called on composite or unknown list type") + } +} + +// totalListSize returns the total size of the list referenced by p. +func (p rawPointer) totalListSize() (sz Size, ok bool) { + n := p.numListElements() + switch p.listType() { + case voidList: + return 0, true + case bit1List: + return Size((n + 7) / 8), true + case compositeList: + // For a composite list, n represents the number of words (excluding the tag word). + return wordSize.times(n + 1) + default: + return p.elementSize().totalSize().times(n) + } +} + +// offset returns a pointer's offset. Only valid for struct or list +// pointers. +func (p rawPointer) offset() pointerOffset { + return pointerOffset(int32(p) >> 2) +} + +// withOffset replaces a pointer's offset. Only valid for struct or +// list pointers. +func (p rawPointer) withOffset(off pointerOffset) rawPointer { + return p&^0xfffffffc | rawPointer(uint32(off<<2)) +} + +// farAddress returns the address of the landing pad pointer. +func (p rawPointer) farAddress() Address { + // Far pointer offset is 29 bits, starting after the low 3 bits. + // It's an unsigned word offset, which would be equivalent to a + // logical left shift by 3. + return Address(p) &^ 7 +} + +// farSegment returns the segment ID that the far pointer references. +func (p rawPointer) farSegment() SegmentID { + return SegmentID(p >> 32) +} + +// otherPointerType returns the type of "other pointer" from p. +func (p rawPointer) otherPointerType() uint32 { + return uint32(p) >> 2 +} + +// capabilityIndex returns the index of the capability in the message's capability table. +func (p rawPointer) capabilityIndex() CapabilityID { + return CapabilityID(p >> 32) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/readlimit.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/readlimit.go new file mode 100644 index 00000000..1e1f9808 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/readlimit.go @@ -0,0 +1,38 @@ +package capnp + +import "sync/atomic" + +// A ReadLimiter tracks the number of bytes read from a message in order +// to avoid amplification attacks as detailed in +// https://capnproto.org/encoding.html#amplification-attack. +// It is safe to use from multiple goroutines. +type ReadLimiter struct { + limit uint64 +} + +// canRead reports whether the amount of bytes can be stored safely. +func (rl *ReadLimiter) canRead(sz Size) bool { + for { + curr := atomic.LoadUint64(&rl.limit) + ok := curr >= uint64(sz) + var new uint64 + if ok { + new = curr - uint64(sz) + } else { + new = 0 + } + if atomic.CompareAndSwapUint64(&rl.limit, curr, new) { + return ok + } + } +} + +// Reset sets the number of bytes allowed to be read. +func (rl *ReadLimiter) Reset(limit uint64) { + atomic.StoreUint64(&rl.limit, limit) +} + +// Unread increases the limit by sz. +func (rl *ReadLimiter) Unread(sz Size) { + atomic.AddUint64(&rl.limit, uint64(sz)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/regen.sh b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/regen.sh new file mode 100644 index 00000000..a9c6cb14 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/regen.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# regen.sh - update capnpc-go and regenerate schemas +set -euo pipefail + +cd "$(dirname "$0")" + +echo "** mktemplates" +(cd internal/cmd/mktemplates && go build -tags=mktemplates) + +echo "** capnpc-go" +# Run tests so that we don't install a broken capnpc-go. +(cd capnpc-go && go generate && go test && go install) + +echo "** schemas" +(cd std/capnp; ./gen.sh compile) +capnp compile -ogo std/go.capnp && mv std/go.capnp.go ./ +go generate ./... diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/BUILD.bazel b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/BUILD.bazel new file mode 100644 index 00000000..b387aeaa --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["schemas.go"], + visibility = ["//visibility:public"], + deps = ["//internal/packed:go_default_library"], +) + +go_test( + name = "go_default_xtest", + srcs = ["schemas_test.go"], + deps = [ + ":go_default_library", + "//:go_default_library", + "//internal/schema:go_default_library", + ], +) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/schemas.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/schemas.go new file mode 100644 index 00000000..8da117e4 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/schemas.go @@ -0,0 +1,185 @@ +// Package schemas provides a container for Cap'n Proto reflection data. +// The code generated by capnpc-go will register its schema in the +// default registry (unless disabled at generation time). +// +// Most programs will use the default registry. However, a program +// could dynamically build up a registry, perhaps by invoking the capnp +// tool or querying a service. +package schemas + +import ( + "bufio" + "bytes" + "compress/zlib" + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "zombiezen.com/go/capnproto2/internal/packed" +) + +// A Schema is a collection of schema nodes parsed by the capnp tool. +type Schema struct { + // Either String or Bytes must be populated with a CodeGeneratorRequest + // message in the standard Cap'n Proto framing format. + String string + Bytes []byte + + // If true, the input is assumed to be zlib-compressed and packed. + Compressed bool + + // Node IDs that are contained in this schema. + Nodes []uint64 +} + +// A Registry is a mapping of IDs to schema blobs. It is safe to read +// from multiple goroutines. The zero value is an empty registry. +type Registry struct { + m map[uint64]*record +} + +// Register indexes a schema in the registry. It is an error to +// register schemas with overlapping IDs. +func (reg *Registry) Register(s *Schema) error { + if len(s.String) > 0 && len(s.Bytes) > 0 { + return errors.New("schemas: schema should have only one of string or bytes") + } + r := &record{ + s: s.String, + data: s.Bytes, + compressed: s.Compressed, + } + if reg.m == nil { + reg.m = make(map[uint64]*record) + } + for _, id := range s.Nodes { + if _, dup := reg.m[id]; dup { + return &dupeError{id: id} + } + reg.m[id] = r + } + return nil +} + +// Find returns the CodeGeneratorRequest message for the given ID, +// suitable for capnp.Unmarshal. If the ID is not found, Find returns +// an error that can be identified with IsNotFound. The returned byte +// slice should not be modified. +func (reg *Registry) Find(id uint64) ([]byte, error) { + r := reg.m[id] + if r == nil { + return nil, ¬FoundError{id: id} + } + b, err := r.read() + if err != nil { + return nil, &decompressError{id, err} + } + return b, nil +} + +type record struct { + // All the fields are protected by once. + once sync.Once + s string // input + compressed bool + data []byte // input and result + err error // result +} + +func (r *record) read() ([]byte, error) { + r.once.Do(func() { + if !r.compressed { + if r.s != "" { + r.data = []byte(r.s) + r.s = "" + } + return + } + var in io.Reader + if r.s != "" { + in = strings.NewReader(r.s) + r.s = "" + } else { + in = bytes.NewReader(r.data) + } + z, err := zlib.NewReader(in) + if err != nil { + r.data, r.err = nil, err + return + } + p := packed.NewReader(bufio.NewReader(z)) + r.data, r.err = ioutil.ReadAll(p) + if err != nil { + r.data = nil + return + } + }) + return r.data, r.err +} + +// DefaultRegistry is the process-wide registry used by Register and Find. +var DefaultRegistry Registry + +// Register is called by generated code to associate a blob of zlib- +// compressed, packed Cap'n Proto data for a CodeGeneratorRequest with +// the IDs it contains. It should only be called during init(). +func Register(data string, ids ...uint64) { + err := DefaultRegistry.Register(&Schema{ + String: data, + Nodes: ids, + Compressed: true, + }) + if err != nil { + panic(err) + } +} + +// Find returns the CodeGeneratorRequest message for the given ID, +// suitable for capnp.Unmarshal, or nil if the ID was not found. +// It is safe to call Find from multiple goroutines, so the returned +// byte slice should not be modified. However, it is not safe to +// call Find concurrently with Register. +func Find(id uint64) []byte { + b, err := DefaultRegistry.Find(id) + if IsNotFound(err) { + return nil + } + if err != nil { + panic(err) + } + return b +} + +// IsNotFound reports whether e indicates a failure to find a schema. +func IsNotFound(e error) bool { + _, ok := e.(*notFoundError) + return ok +} + +type dupeError struct { + id uint64 +} + +func (e *dupeError) Error() string { + return fmt.Sprintf("schemas: registered @%#x twice", e.id) +} + +type notFoundError struct { + id uint64 +} + +func (e *notFoundError) Error() string { + return fmt.Sprintf("schemas: could not find @%#x", e.id) +} + +type decompressError struct { + id uint64 + err error +} + +func (e *decompressError) Error() string { + return fmt.Sprintf("schemas: decompressing schema for @%#x: %v", e.id, e.err) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/strings.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/strings.go new file mode 100644 index 00000000..a3f45b4c --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/strings.go @@ -0,0 +1,125 @@ +// +build !nocapnpstrings + +package capnp + +import ( + "fmt" +) + +// String returns the address in hex format. +func (addr Address) String() string { + return fmt.Sprintf("%#08x", uint64(addr)) +} + +// GoString returns the address in hex format. +func (addr Address) GoString() string { + return fmt.Sprintf("capnp.Address(%#08x)", uint64(addr)) +} + +// String returns the size in the format "X bytes". +func (sz Size) String() string { + if sz == 1 { + return "1 byte" + } + return fmt.Sprintf("%d bytes", sz) +} + +// GoString returns the size as a Go expression. +func (sz Size) GoString() string { + return fmt.Sprintf("capnp.Size(%d)", sz) +} + +// String returns the offset in the format "+X bytes". +func (off DataOffset) String() string { + if off == 1 { + return "+1 byte" + } + return fmt.Sprintf("+%d bytes", off) +} + +// GoString returns the offset as a Go expression. +func (off DataOffset) GoString() string { + return fmt.Sprintf("capnp.DataOffset(%d)", off) +} + +// String returns a short, human readable representation of the object +// size. +func (sz ObjectSize) String() string { + return fmt.Sprintf("{datasz=%d ptrs=%d}", sz.DataSize, sz.PointerCount) +} + +// GoString formats the ObjectSize as a keyed struct literal. +func (sz ObjectSize) GoString() string { + return fmt.Sprintf("capnp.ObjectSize{DataSize: %d, PointerCount: %d}", sz.DataSize, sz.PointerCount) +} + +// String returns the offset in the format "bit X". +func (bit BitOffset) String() string { + return fmt.Sprintf("bit %d", bit) +} + +// GoString returns the offset as a Go expression. +func (bit BitOffset) GoString() string { + return fmt.Sprintf("capnp.BitOffset(%d)", bit) +} + +// String returns the ID in the format "capability X". +func (id CapabilityID) String() string { + return fmt.Sprintf("capability %d", id) +} + +// GoString returns the ID as a Go expression. +func (id CapabilityID) GoString() string { + return fmt.Sprintf("capnp.CapabilityID(%d)", id) +} + +// GoString formats the pointer as a call to one of the rawPointer +// construction functions. +func (p rawPointer) GoString() string { + if p == 0 { + return "rawPointer(0)" + } + switch p.pointerType() { + case structPointer: + return fmt.Sprintf("rawStructPointer(%d, %#v)", p.offset(), p.structSize()) + case listPointer: + var lt string + switch p.listType() { + case voidList: + lt = "voidList" + case bit1List: + lt = "bit1List" + case byte1List: + lt = "byte1List" + case byte2List: + lt = "byte2List" + case byte4List: + lt = "byte4List" + case byte8List: + lt = "byte8List" + case pointerList: + lt = "pointerList" + case compositeList: + lt = "compositeList" + } + return fmt.Sprintf("rawListPointer(%d, %s, %d)", p.offset(), lt, p.numListElements()) + case farPointer: + return fmt.Sprintf("rawFarPointer(%d, %v)", p.farSegment(), p.farAddress()) + case doubleFarPointer: + return fmt.Sprintf("rawDoubleFarPointer(%d, %v)", p.farSegment(), p.farAddress()) + default: + // other pointer + if p.otherPointerType() != 0 { + return fmt.Sprintf("rawPointer(%#016x)", uint64(p)) + } + return fmt.Sprintf("rawInterfacePointer(%d)", p.capabilityIndex()) + } +} + +func (ssa *singleSegmentArena) String() string { + return fmt.Sprintf("single-segment arena [len=%d cap=%d]", len(*ssa), cap(*ssa)) +} + +func (msa *multiSegmentArena) String() string { + return fmt.Sprintf("multi-segment arena [%d segments]", len(*msa)) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/struct.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/struct.go new file mode 100644 index 00000000..92148253 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/struct.go @@ -0,0 +1,368 @@ +package capnp + +// Struct is a pointer to a struct. +type Struct struct { + seg *Segment + off Address + size ObjectSize + depthLimit uint + flags structFlags +} + +// NewStruct creates a new struct, preferring placement in s. +func NewStruct(s *Segment, sz ObjectSize) (Struct, error) { + if !sz.isValid() { + return Struct{}, errObjectSize + } + sz.DataSize = sz.DataSize.padToWord() + seg, addr, err := alloc(s, sz.totalSize()) + if err != nil { + return Struct{}, err + } + return Struct{ + seg: seg, + off: addr, + size: sz, + depthLimit: maxDepth, + }, nil +} + +// NewRootStruct creates a new struct, preferring placement in s, then sets the +// message's root to the new struct. +func NewRootStruct(s *Segment, sz ObjectSize) (Struct, error) { + st, err := NewStruct(s, sz) + if err != nil { + return st, err + } + if err := s.msg.SetRootPtr(st.ToPtr()); err != nil { + return st, err + } + return st, nil +} + +// ToStruct converts p to a Struct. +// +// Deprecated: Use Ptr.Struct. +func ToStruct(p Pointer) Struct { + if !IsValid(p) { + return Struct{} + } + s, ok := p.underlying().(Struct) + if !ok { + return Struct{} + } + return s +} + +// ToStructDefault attempts to convert p into a struct, reading the +// default value from def if p is not a struct. +// +// Deprecated: Use Ptr.StructDefault. +func ToStructDefault(p Pointer, def []byte) (Struct, error) { + return toPtr(p).StructDefault(def) +} + +// ToPtr converts the struct to a generic pointer. +func (p Struct) ToPtr() Ptr { + return Ptr{ + seg: p.seg, + off: p.off, + size: p.size, + depthLimit: p.depthLimit, + flags: structPtrFlag(p.flags), + } +} + +// Segment returns the segment this pointer came from. +func (p Struct) Segment() *Segment { + return p.seg +} + +// IsValid returns whether the struct is valid. +func (p Struct) IsValid() bool { + return p.seg != nil +} + +// Address returns the address the pointer references. +// +// Deprecated: The return value is not well-defined. Use SamePtr if you +// need to check whether two pointers refer to the same object. +func (p Struct) Address() Address { + return p.off +} + +// Size returns the size of the struct. +func (p Struct) Size() ObjectSize { + return p.size +} + +// HasData reports whether the struct has a non-zero size. +func (p Struct) HasData() bool { + return !p.size.isZero() +} + +// readSize returns the struct's size for the purposes of read limit +// accounting. +func (p Struct) readSize() Size { + if p.seg == nil { + return 0 + } + return p.size.totalSize() +} + +func (p Struct) underlying() Pointer { + return p +} + +// Pointer returns the i'th pointer in the struct. +// +// Deprecated: Use Ptr. +func (p Struct) Pointer(i uint16) (Pointer, error) { + pp, err := p.Ptr(i) + return pp.toPointer(), err +} + +// Ptr returns the i'th pointer in the struct. +func (p Struct) Ptr(i uint16) (Ptr, error) { + if p.seg == nil || i >= p.size.PointerCount { + return Ptr{}, nil + } + return p.seg.readPtr(p.pointerAddress(i), p.depthLimit) +} + +// SetPointer sets the i'th pointer in the struct to src. +// +// Deprecated: Use SetPtr. +func (p Struct) SetPointer(i uint16, src Pointer) error { + return p.SetPtr(i, toPtr(src)) +} + +// SetPtr sets the i'th pointer in the struct to src. +func (p Struct) SetPtr(i uint16, src Ptr) error { + if p.seg == nil || i >= p.size.PointerCount { + panic(errOutOfBounds) + } + return p.seg.writePtr(p.pointerAddress(i), src, false) +} + +// SetText sets the i'th pointer to a newly allocated text or null if v is empty. +func (p Struct) SetText(i uint16, v string) error { + if v == "" { + return p.SetPtr(i, Ptr{}) + } + return p.SetNewText(i, v) +} + +// SetNewText sets the i'th pointer to a newly allocated text. +func (p Struct) SetNewText(i uint16, v string) error { + t, err := NewText(p.seg, v) + if err != nil { + return err + } + return p.SetPtr(i, t.List.ToPtr()) +} + +// SetTextFromBytes sets the i'th pointer to a newly allocated text or null if v is nil. +func (p Struct) SetTextFromBytes(i uint16, v []byte) error { + if v == nil { + return p.SetPtr(i, Ptr{}) + } + t, err := NewTextFromBytes(p.seg, v) + if err != nil { + return err + } + return p.SetPtr(i, t.List.ToPtr()) +} + +// SetData sets the i'th pointer to a newly allocated data or null if v is nil. +func (p Struct) SetData(i uint16, v []byte) error { + if v == nil { + return p.SetPtr(i, Ptr{}) + } + d, err := NewData(p.seg, v) + if err != nil { + return err + } + return p.SetPtr(i, d.List.ToPtr()) +} + +func (p Struct) pointerAddress(i uint16) Address { + // Struct already had bounds check + ptrStart, _ := p.off.addSize(p.size.DataSize) + a, _ := ptrStart.element(int32(i), wordSize) + return a +} + +// bitInData reports whether bit is inside p's data section. +func (p Struct) bitInData(bit BitOffset) bool { + return p.seg != nil && bit < BitOffset(p.size.DataSize*8) +} + +// Bit returns the bit that is n bits from the start of the struct. +func (p Struct) Bit(n BitOffset) bool { + if !p.bitInData(n) { + return false + } + addr := p.off.addOffset(n.offset()) + return p.seg.readUint8(addr)&n.mask() != 0 +} + +// SetBit sets the bit that is n bits from the start of the struct to v. +func (p Struct) SetBit(n BitOffset, v bool) { + if !p.bitInData(n) { + panic(errOutOfBounds) + } + addr := p.off.addOffset(n.offset()) + b := p.seg.readUint8(addr) + if v { + b |= n.mask() + } else { + b &^= n.mask() + } + p.seg.writeUint8(addr, b) +} + +func (p Struct) dataAddress(off DataOffset, sz Size) (addr Address, ok bool) { + if p.seg == nil || Size(off)+sz > p.size.DataSize { + return 0, false + } + return p.off.addOffset(off), true +} + +// Uint8 returns an 8-bit integer from the struct's data section. +func (p Struct) Uint8(off DataOffset) uint8 { + addr, ok := p.dataAddress(off, 1) + if !ok { + return 0 + } + return p.seg.readUint8(addr) +} + +// Uint16 returns a 16-bit integer from the struct's data section. +func (p Struct) Uint16(off DataOffset) uint16 { + addr, ok := p.dataAddress(off, 2) + if !ok { + return 0 + } + return p.seg.readUint16(addr) +} + +// Uint32 returns a 32-bit integer from the struct's data section. +func (p Struct) Uint32(off DataOffset) uint32 { + addr, ok := p.dataAddress(off, 4) + if !ok { + return 0 + } + return p.seg.readUint32(addr) +} + +// Uint64 returns a 64-bit integer from the struct's data section. +func (p Struct) Uint64(off DataOffset) uint64 { + addr, ok := p.dataAddress(off, 8) + if !ok { + return 0 + } + return p.seg.readUint64(addr) +} + +// SetUint8 sets the 8-bit integer that is off bytes from the start of the struct to v. +func (p Struct) SetUint8(off DataOffset, v uint8) { + addr, ok := p.dataAddress(off, 1) + if !ok { + panic(errOutOfBounds) + } + p.seg.writeUint8(addr, v) +} + +// SetUint16 sets the 16-bit integer that is off bytes from the start of the struct to v. +func (p Struct) SetUint16(off DataOffset, v uint16) { + addr, ok := p.dataAddress(off, 2) + if !ok { + panic(errOutOfBounds) + } + p.seg.writeUint16(addr, v) +} + +// SetUint32 sets the 32-bit integer that is off bytes from the start of the struct to v. +func (p Struct) SetUint32(off DataOffset, v uint32) { + addr, ok := p.dataAddress(off, 4) + if !ok { + panic(errOutOfBounds) + } + p.seg.writeUint32(addr, v) +} + +// SetUint64 sets the 64-bit integer that is off bytes from the start of the struct to v. +func (p Struct) SetUint64(off DataOffset, v uint64) { + addr, ok := p.dataAddress(off, 8) + if !ok { + panic(errOutOfBounds) + } + p.seg.writeUint64(addr, v) +} + +// structFlags is a bitmask of flags for a pointer. +type structFlags uint8 + +// Pointer flags. +const ( + isListMember structFlags = 1 << iota +) + +// copyStruct makes a deep copy of src into dst. +func copyStruct(dst, src Struct) error { + if dst.seg == nil { + return nil + } + + // Q: how does version handling happen here, when the + // destination toData[] slice can be bigger or smaller + // than the source data slice, which is in + // src.seg.Data[src.off:src.off+src.size.DataSize] ? + // + // A: Newer fields only come *after* old fields. Note that + // copy only copies min(len(src), len(dst)) size, + // and then we manually zero the rest in the for loop + // that writes toData[j] = 0. + // + + // data section: + srcData := src.seg.slice(src.off, src.size.DataSize) + dstData := dst.seg.slice(dst.off, dst.size.DataSize) + copyCount := copy(dstData, srcData) + dstData = dstData[copyCount:] + for j := range dstData { + dstData[j] = 0 + } + + // ptrs section: + + // version handling: we ignore any extra-newer-pointers in src, + // i.e. the case when srcPtrSize > dstPtrSize, by only + // running j over the size of dstPtrSize, the destination size. + srcPtrSect, _ := src.off.addSize(src.size.DataSize) + dstPtrSect, _ := dst.off.addSize(dst.size.DataSize) + numSrcPtrs := src.size.PointerCount + numDstPtrs := dst.size.PointerCount + for j := uint16(0); j < numSrcPtrs && j < numDstPtrs; j++ { + srcAddr, _ := srcPtrSect.element(int32(j), wordSize) + dstAddr, _ := dstPtrSect.element(int32(j), wordSize) + m, err := src.seg.readPtr(srcAddr, src.depthLimit) + if err != nil { + return err + } + err = dst.seg.writePtr(dstAddr, m, true) + if err != nil { + return err + } + } + for j := numSrcPtrs; j < numDstPtrs; j++ { + // destination p is a newer version than source so these extra new pointer fields in p must be zeroed. + addr, _ := dstPtrSect.element(int32(j), wordSize) + dst.seg.writeRawPointer(addr, 0) + } + // Nothing more here: so any other pointers in srcPtrSize beyond + // those in dstPtrSize are ignored and discarded. + + return nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go new file mode 100644 index 00000000..1b8315ef --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go @@ -0,0 +1,143 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +// A container interface allows perform actions against a container +type Container interface { + // + // Container + // + + // GetContainers + GetClusterMD(*GetClusterMDInput, interface{}, chan *Response) (*Request, error) + + // GetContainersSync + GetClusterMDSync(*GetClusterMDInput) (*Response, error) + + // GetContainers + GetContainers(*GetContainersInput, interface{}, chan *Response) (*Request, error) + + // GetContainersSync + GetContainersSync(*GetContainersInput) (*Response, error) + + // GetContainers + GetContainerContents(*GetContainerContentsInput, interface{}, chan *Response) (*Request, error) + + // GetContainerContentsSync + GetContainerContentsSync(*GetContainerContentsInput) (*Response, error) + + // + // Object + // + // CheckPathExists + CheckPathExists(*CheckPathExistsInput, interface{}, chan *Response) (*Request, error) + + // CheckPathExistsSync + CheckPathExistsSync(*CheckPathExistsInput) error + + // GetObject + GetObject(*GetObjectInput, interface{}, chan *Response) (*Request, error) + + // GetObjectSync + GetObjectSync(*GetObjectInput) (*Response, error) + + // PutObject + PutObject(*PutObjectInput, interface{}, chan *Response) (*Request, error) + + // PutObjectSync + PutObjectSync(*PutObjectInput) error + + // DeleteObject + DeleteObject(*DeleteObjectInput, interface{}, chan *Response) (*Request, error) + + // DeleteObjectSync + DeleteObjectSync(*DeleteObjectInput) error + + // + // KV + // + + // GetItem + GetItem(*GetItemInput, interface{}, chan *Response) (*Request, error) + + // GetItemSync + GetItemSync(*GetItemInput) (*Response, error) + + // GetItems + GetItems(*GetItemsInput, interface{}, chan *Response) (*Request, error) + + // GetItemSync + GetItemsSync(*GetItemsInput) (*Response, error) + + // PutItem + PutItem(*PutItemInput, interface{}, chan *Response) (*Request, error) + + // PutItemSync + PutItemSync(*PutItemInput) (*Response, error) + + // PutItems + PutItems(*PutItemsInput, interface{}, chan *Response) (*Request, error) + + // PutItemsSync + PutItemsSync(*PutItemsInput) (*Response, error) + + // UpdateItem + UpdateItem(*UpdateItemInput, interface{}, chan *Response) (*Request, error) + + // UpdateItemSync + UpdateItemSync(*UpdateItemInput) (*Response, error) + + // + // Stream + // + + // CreateStream + CreateStream(*CreateStreamInput, interface{}, chan *Response) (*Request, error) + + // CreateStreamSync + CreateStreamSync(*CreateStreamInput) error + + // DescribeStream + DescribeStream(*DescribeStreamInput, interface{}, chan *Response) (*Request, error) + + // DescribeStreamSync + DescribeStreamSync(*DescribeStreamInput) (*Response, error) + + // DeleteStream + DeleteStream(*DeleteStreamInput, interface{}, chan *Response) (*Request, error) + + // DeleteStreamSync + DeleteStreamSync(*DeleteStreamInput) error + + // SeekShard + SeekShard(*SeekShardInput, interface{}, chan *Response) (*Request, error) + + // SeekShardSync + SeekShardSync(*SeekShardInput) (*Response, error) + + // PutRecords + PutRecords(*PutRecordsInput, interface{}, chan *Response) (*Request, error) + + // PutRecordsSync + PutRecordsSync(*PutRecordsInput) (*Response, error) + + // GetRecords + GetRecords(*GetRecordsInput, interface{}, chan *Response) (*Request, error) + + // GetRecordsSync + GetRecordsSync(*GetRecordsInput) (*Response, error) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go new file mode 100644 index 00000000..5a9455df --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +type Context interface { + Container + + // create a new session + NewSession(*NewSessionInput) (Session, error) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go new file mode 100644 index 00000000..da642cea --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go @@ -0,0 +1,265 @@ +package v3iohttp + +import ( + "github.com/v3io/v3io-go/pkg/dataplane" + + "github.com/nuclio/logger" +) + +type container struct { + logger logger.Logger + session *session + containerName string +} + +func newContainer(parentLogger logger.Logger, + session *session, + containerName string) (v3io.Container, error) { + + return &container{ + logger: parentLogger.GetChild("container"), + session: session, + containerName: containerName, + }, nil +} + +func (c *container) populateInputFields(input *v3io.DataPlaneInput) { + input.ContainerName = c.containerName + input.URL = c.session.url + input.AuthenticationToken = c.session.authenticationToken + input.AccessKey = c.session.accessKey +} + +// GetItem +func (c *container) GetItem(getItemInput *v3io.GetItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getItemInput.DataPlaneInput) + return c.session.context.GetItem(getItemInput, context, responseChan) +} + +// GetItemSync +func (c *container) GetItemSync(getItemInput *v3io.GetItemInput) (*v3io.Response, error) { + c.populateInputFields(&getItemInput.DataPlaneInput) + return c.session.context.GetItemSync(getItemInput) +} + +// GetItems +func (c *container) GetItems(getItemsInput *v3io.GetItemsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getItemsInput.DataPlaneInput) + return c.session.context.GetItems(getItemsInput, context, responseChan) +} + +// GetItemSync +func (c *container) GetItemsSync(getItemsInput *v3io.GetItemsInput) (*v3io.Response, error) { + c.populateInputFields(&getItemsInput.DataPlaneInput) + return c.session.context.GetItemsSync(getItemsInput) +} + +// PutItem +func (c *container) PutItem(putItemInput *v3io.PutItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&putItemInput.DataPlaneInput) + return c.session.context.PutItem(putItemInput, context, responseChan) +} + +// PutItemSync +func (c *container) PutItemSync(putItemInput *v3io.PutItemInput) (*v3io.Response, error) { + c.populateInputFields(&putItemInput.DataPlaneInput) + return c.session.context.PutItemSync(putItemInput) +} + +// PutItems +func (c *container) PutItems(putItemsInput *v3io.PutItemsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&putItemsInput.DataPlaneInput) + return c.session.context.PutItems(putItemsInput, context, responseChan) +} + +// PutItemsSync +func (c *container) PutItemsSync(putItemsInput *v3io.PutItemsInput) (*v3io.Response, error) { + c.populateInputFields(&putItemsInput.DataPlaneInput) + return c.session.context.PutItemsSync(putItemsInput) +} + +// UpdateItem +func (c *container) UpdateItem(updateItemInput *v3io.UpdateItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&updateItemInput.DataPlaneInput) + return c.session.context.UpdateItem(updateItemInput, context, responseChan) +} + +// UpdateItemSync +func (c *container) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) (*v3io.Response, error) { + c.populateInputFields(&updateItemInput.DataPlaneInput) + return c.session.context.UpdateItemSync(updateItemInput) +} + +// GetObject +func (c *container) GetObject(getObjectInput *v3io.GetObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getObjectInput.DataPlaneInput) + return c.session.context.GetObject(getObjectInput, context, responseChan) +} + +// GetObjectSync +func (c *container) GetObjectSync(getObjectInput *v3io.GetObjectInput) (*v3io.Response, error) { + c.populateInputFields(&getObjectInput.DataPlaneInput) + return c.session.context.GetObjectSync(getObjectInput) +} + +// PutObject +func (c *container) PutObject(putObjectInput *v3io.PutObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&putObjectInput.DataPlaneInput) + return c.session.context.PutObject(putObjectInput, context, responseChan) +} + +// PutObjectSync +func (c *container) PutObjectSync(putObjectInput *v3io.PutObjectInput) error { + c.populateInputFields(&putObjectInput.DataPlaneInput) + return c.session.context.PutObjectSync(putObjectInput) +} + +// DeleteObject +func (c *container) DeleteObject(deleteObjectInput *v3io.DeleteObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&deleteObjectInput.DataPlaneInput) + return c.session.context.DeleteObject(deleteObjectInput, context, responseChan) +} + +// DeleteObjectSync +func (c *container) DeleteObjectSync(deleteObjectInput *v3io.DeleteObjectInput) error { + c.populateInputFields(&deleteObjectInput.DataPlaneInput) + return c.session.context.DeleteObjectSync(deleteObjectInput) +} + +// GetContainers +func (c *container) GetContainers(getContainersInput *v3io.GetContainersInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getContainersInput.DataPlaneInput) + return c.session.context.GetContainers(getContainersInput, context, responseChan) +} + +// GetContainersSync +func (c *container) GetContainersSync(getContainersInput *v3io.GetContainersInput) (*v3io.Response, error) { + c.populateInputFields(&getContainersInput.DataPlaneInput) + return c.session.context.GetContainersSync(getContainersInput) +} + +// GetClusterMD +func (c *container) GetClusterMD(getClusterMDInput *v3io.GetClusterMDInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getClusterMDInput.DataPlaneInput) + return c.session.context.GetClusterMD(getClusterMDInput, context, responseChan) +} + +// GetClusterMDSync +func (c *container) GetClusterMDSync(getClusterMDInput *v3io.GetClusterMDInput) (*v3io.Response, error) { + c.populateInputFields(&getClusterMDInput.DataPlaneInput) + return c.session.context.GetClusterMDSync(getClusterMDInput) +} + +// GetContainers +func (c *container) GetContainerContents(getContainerContentsInput *v3io.GetContainerContentsInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getContainerContentsInput.DataPlaneInput) + return c.session.context.GetContainerContents(getContainerContentsInput, context, responseChan) +} + +// GetContainerContentsSync +func (c *container) GetContainerContentsSync(getContainerContentsInput *v3io.GetContainerContentsInput) (*v3io.Response, error) { + c.populateInputFields(&getContainerContentsInput.DataPlaneInput) + return c.session.context.GetContainerContentsSync(getContainerContentsInput) +} + +// CreateStream +func (c *container) CreateStream(createStreamInput *v3io.CreateStreamInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&createStreamInput.DataPlaneInput) + return c.session.context.CreateStream(createStreamInput, context, responseChan) +} + +// CreateStreamSync +func (c *container) CreateStreamSync(createStreamInput *v3io.CreateStreamInput) error { + c.populateInputFields(&createStreamInput.DataPlaneInput) + return c.session.context.CreateStreamSync(createStreamInput) +} + +// DescribeStream +func (c *container) DescribeStream(describeStreamInput *v3io.DescribeStreamInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&describeStreamInput.DataPlaneInput) + return c.session.context.DescribeStream(describeStreamInput, context, responseChan) +} + +// DescribeStreamSync +func (c *container) DescribeStreamSync(describeStreamInput *v3io.DescribeStreamInput) (*v3io.Response, error) { + c.populateInputFields(&describeStreamInput.DataPlaneInput) + return c.session.context.DescribeStreamSync(describeStreamInput) +} + +// CheckPathExists +func (c *container) CheckPathExists(checkPathExistsInput *v3io.CheckPathExistsInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&checkPathExistsInput.DataPlaneInput) + return c.session.context.CheckPathExists(checkPathExistsInput, context, responseChan) +} + +// CheckPathExistsSync +func (c *container) CheckPathExistsSync(checkPathExistsInput *v3io.CheckPathExistsInput) error { + c.populateInputFields(&checkPathExistsInput.DataPlaneInput) + return c.session.context.CheckPathExistsSync(checkPathExistsInput) +} + +// DeleteStream +func (c *container) DeleteStream(deleteStreamInput *v3io.DeleteStreamInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&deleteStreamInput.DataPlaneInput) + return c.session.context.DeleteStream(deleteStreamInput, context, responseChan) +} + +// DeleteStreamSync +func (c *container) DeleteStreamSync(deleteStreamInput *v3io.DeleteStreamInput) error { + c.populateInputFields(&deleteStreamInput.DataPlaneInput) + return c.session.context.DeleteStreamSync(deleteStreamInput) +} + +// SeekShard +func (c *container) SeekShard(seekShardInput *v3io.SeekShardInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&seekShardInput.DataPlaneInput) + return c.session.context.SeekShard(seekShardInput, context, responseChan) +} + +// SeekShardSync +func (c *container) SeekShardSync(seekShardInput *v3io.SeekShardInput) (*v3io.Response, error) { + c.populateInputFields(&seekShardInput.DataPlaneInput) + return c.session.context.SeekShardSync(seekShardInput) +} + +// PutRecords +func (c *container) PutRecords(putRecordsInput *v3io.PutRecordsInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&putRecordsInput.DataPlaneInput) + return c.session.context.PutRecords(putRecordsInput, context, responseChan) +} + +// PutRecordsSync +func (c *container) PutRecordsSync(putRecordsInput *v3io.PutRecordsInput) (*v3io.Response, error) { + c.populateInputFields(&putRecordsInput.DataPlaneInput) + return c.session.context.PutRecordsSync(putRecordsInput) +} + +// GetRecords +func (c *container) GetRecords(getRecordsInput *v3io.GetRecordsInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getRecordsInput.DataPlaneInput) + return c.session.context.GetRecords(getRecordsInput, context, responseChan) +} + +// GetRecordsSync +func (c *container) GetRecordsSync(getRecordsInput *v3io.GetRecordsInput) (*v3io.Response, error) { + c.populateInputFields(&getRecordsInput.DataPlaneInput) + return c.session.context.GetRecordsSync(getRecordsInput) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go new file mode 100644 index 00000000..1447c7d4 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go @@ -0,0 +1,1539 @@ +package v3iohttp + +import ( + "bytes" + "crypto/tls" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net" + "net/http" + "net/url" + "path" + "reflect" + "regexp" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common" + "github.com/v3io/v3io-go/pkg/errors" + + "github.com/nuclio/errors" + "github.com/nuclio/logger" + "github.com/valyala/fasthttp" + "zombiezen.com/go/capnproto2" +) + +// TODO: Request should have a global pool +var requestID uint64 + +type context struct { + logger logger.Logger + requestChan chan *v3io.Request + httpClient *fasthttp.Client + clusterEndpoints []string + numWorkers int +} + +type NewClientInput struct { + TLSConfig *tls.Config + DialTimeout time.Duration + MaxConnsPerHost int +} + +func NewClient(newClientInput *NewClientInput) *fasthttp.Client { + tlsConfig := newClientInput.TLSConfig + if tlsConfig == nil { + tlsConfig = &tls.Config{InsecureSkipVerify: true} + } + + dialTimeout := newClientInput.DialTimeout + if dialTimeout == 0 { + dialTimeout = fasthttp.DefaultDialTimeout + } + dialFunction := func(addr string) (net.Conn, error) { + return fasthttp.DialTimeout(addr, dialTimeout) + } + + return &fasthttp.Client{ + TLSConfig: tlsConfig, + Dial: dialFunction, + MaxConnsPerHost: newClientInput.MaxConnsPerHost, + } +} + +func NewContext(parentLogger logger.Logger, newContextInput *NewContextInput) (v3io.Context, error) { + requestChanLen := newContextInput.RequestChanLen + if requestChanLen == 0 { + requestChanLen = 1024 + } + + numWorkers := newContextInput.NumWorkers + if numWorkers == 0 { + numWorkers = 8 + } + + httpClient := newContextInput.HTTPClient + if httpClient == nil { + httpClient = NewClient(&NewClientInput{}) + } + + newContext := &context{ + logger: parentLogger.GetChild("context.http"), + httpClient: httpClient, + requestChan: make(chan *v3io.Request, requestChanLen), + numWorkers: numWorkers, + } + + for workerIndex := 0; workerIndex < numWorkers; workerIndex++ { + go newContext.workerEntry(workerIndex) + } + + return newContext, nil +} + +// create a new session +func (c *context) NewSession(newSessionInput *v3io.NewSessionInput) (v3io.Session, error) { + return newSession(c.logger, + c, + newSessionInput.URL, + newSessionInput.Username, + newSessionInput.Password, + newSessionInput.AccessKey) +} + +// GetContainers +func (c *context) GetContainers(getContainersInput *v3io.GetContainersInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getContainersInput, context, responseChan) +} + +// GetContainersSync +func (c *context) GetContainersSync(getContainersInput *v3io.GetContainersInput) (*v3io.Response, error) { + return c.sendRequestAndXMLUnmarshal( + &getContainersInput.DataPlaneInput, + http.MethodGet, + "", + "", + nil, + nil, + &v3io.GetContainersOutput{}) +} + +// GetClusterMD +func (c *context) GetClusterMD(getClusterMDInput *v3io.GetClusterMDInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getClusterMDInput, context, responseChan) +} + +func (c *context) GetClusterMDSync(getClusterMDInput *v3io.GetClusterMDInput) (*v3io.Response, error) { + response, err := c.sendRequest(&getClusterMDInput.DataPlaneInput, + http.MethodPut, + "", + "", + getClusterMDHeaders, + nil, + false) + if err != nil { + return nil, err + } + + getClusterMDOutput := v3io.GetClusterMDOutput{} + + // unmarshal the body into an ad hoc structure + err = json.Unmarshal(response.Body(), &getClusterMDOutput) + if err != nil { + return nil, err + } + + // set the output in the response + response.Output = &getClusterMDOutput + + return response, nil +} + +// GetContainers +func (c *context) GetContainerContents(getContainerContentsInput *v3io.GetContainerContentsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getContainerContentsInput, context, responseChan) +} + +// GetContainerContentsSync +func (c *context) GetContainerContentsSync(getContainerContentsInput *v3io.GetContainerContentsInput) (*v3io.Response, error) { + getContainerContentOutput := v3io.GetContainerContentsOutput{} + + var queryBuilder strings.Builder + if getContainerContentsInput.Path != "" { + queryBuilder.WriteString("prefix=") + queryBuilder.WriteString(getContainerContentsInput.Path) + } + + if getContainerContentsInput.DirectoriesOnly { + queryBuilder.WriteString("&prefix-only=1") + } + + if getContainerContentsInput.GetAllAttributes { + queryBuilder.WriteString("&prefix-info=1") + } + + if getContainerContentsInput.Marker != "" { + queryBuilder.WriteString("&marker=") + queryBuilder.WriteString(getContainerContentsInput.Marker) + } + + if getContainerContentsInput.Limit > 0 { + queryBuilder.WriteString("&max-keys=") + queryBuilder.WriteString(strconv.Itoa(getContainerContentsInput.Limit)) + } + + return c.sendRequestAndXMLUnmarshal(&getContainerContentsInput.DataPlaneInput, + http.MethodGet, + "", + queryBuilder.String(), + nil, + nil, + &getContainerContentOutput) +} + +// GetItem +func (c *context) GetItem(getItemInput *v3io.GetItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getItemInput, context, responseChan) +} + +type attributeValuesSection struct { + accumulatedPreviousSectionsLength int + data node_common_capnp.VnObjectAttributeValuePtr_List +} + +// GetItemSync +func (c *context) GetItemSync(getItemInput *v3io.GetItemInput) (*v3io.Response, error) { + + // no need to marshal, just sprintf + body := fmt.Sprintf(`{"AttributesToGet": "%s"}`, strings.Join(getItemInput.AttributeNames, ",")) + + response, err := c.sendRequest(&getItemInput.DataPlaneInput, + http.MethodPut, + getItemInput.Path, + "", + getItemHeaders, + []byte(body), + false) + + if err != nil { + return nil, err + } + + // ad hoc structure that contains response + item := struct { + Item map[string]map[string]interface{} + }{} + + // unmarshal the body + err = json.Unmarshal(response.Body(), &item) + if err != nil { + return nil, err + } + + // decode the response + attributes, err := c.decodeTypedAttributes(item.Item) + if err != nil { + return nil, err + } + + // attach the output to the response + response.Output = &v3io.GetItemOutput{Item: attributes} + + return response, nil +} + +// GetItems +func (c *context) GetItems(getItemsInput *v3io.GetItemsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getItemsInput, context, responseChan) +} + +// GetItemSync +func (c *context) GetItemsSync(getItemsInput *v3io.GetItemsInput) (*v3io.Response, error) { + + // create GetItem Body + body := map[string]interface{}{} + + if len(getItemsInput.AttributeNames) > 0 { + body["AttributesToGet"] = strings.Join(getItemsInput.AttributeNames, ",") + } + + if getItemsInput.TableName != "" { + body["TableName"] = getItemsInput.TableName + } + + if getItemsInput.Filter != "" { + body["FilterExpression"] = getItemsInput.Filter + } + + if getItemsInput.Marker != "" { + body["Marker"] = getItemsInput.Marker + } + + if getItemsInput.ShardingKey != "" { + body["ShardingKey"] = getItemsInput.ShardingKey + } + + if getItemsInput.Limit != 0 { + body["Limit"] = getItemsInput.Limit + } + + if getItemsInput.TotalSegments != 0 { + body["TotalSegment"] = getItemsInput.TotalSegments + body["Segment"] = getItemsInput.Segment + } + + if getItemsInput.SortKeyRangeStart != "" { + body["SortKeyRangeStart"] = getItemsInput.SortKeyRangeStart + } + + if getItemsInput.SortKeyRangeEnd != "" { + body["SortKeyRangeEnd"] = getItemsInput.SortKeyRangeEnd + } + + marshalledBody, err := json.Marshal(body) + if err != nil { + return nil, err + } + + headers := getItemsHeadersCapnp + if getItemsInput.RequestJSONResponse { + headers = getItemsHeaders + } + + response, err := c.sendRequest(&getItemsInput.DataPlaneInput, + "PUT", + getItemsInput.Path, + "", + headers, + marshalledBody, + false) + + if err != nil { + return nil, err + } + + contentType := string(response.HeaderPeek("Content-Type")) + + if contentType != "application/octet-capnp" { + c.logger.DebugWithCtx(getItemsInput.Ctx, "Body", "body", string(response.Body())) + response.Output, err = c.getItemsParseJSONResponse(response, getItemsInput) + } else { + var withWildcard bool + for _, attributeName := range getItemsInput.AttributeNames { + if attributeName == "*" || attributeName == "**" { + withWildcard = true + break + } + } + response.Output, err = c.getItemsParseCAPNPResponse(response, withWildcard) + } + return response, err +} + +// PutItem +func (c *context) PutItem(putItemInput *v3io.PutItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(putItemInput, context, responseChan) +} + +// PutItemSync +func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) (*v3io.Response, error) { + var body map[string]interface{} + if putItemInput.UpdateMode != "" { + body = map[string]interface{}{ + "UpdateMode": putItemInput.UpdateMode, + } + } + + // prepare the query path + response, err := c.putItem(&putItemInput.DataPlaneInput, + putItemInput.Path, + putItemFunctionName, + putItemInput.Attributes, + putItemInput.Condition, + putItemHeaders, + body) + if err != nil { + return nil, err + } + + mtimeSecs, mtimeNSecs, err := parseMtimeHeader(response) + if err != nil { + return nil, err + } + response.Output = &v3io.PutItemOutput{MtimeSecs: mtimeSecs, MtimeNSecs: mtimeNSecs} + + return response, err +} + +// PutItems +func (c *context) PutItems(putItemsInput *v3io.PutItemsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(putItemsInput, context, responseChan) +} + +// PutItemsSync +func (c *context) PutItemsSync(putItemsInput *v3io.PutItemsInput) (*v3io.Response, error) { + + response := c.allocateResponse() + if response == nil { + return nil, errors.New("Failed to allocate response") + } + + putItemsOutput := v3io.PutItemsOutput{ + Success: true, + } + + for itemKey, itemAttributes := range putItemsInput.Items { + + // try to post the item + _, err := c.putItem(&putItemsInput.DataPlaneInput, + putItemsInput.Path+"/"+itemKey, + putItemFunctionName, + itemAttributes, + putItemsInput.Condition, + putItemHeaders, + nil) + + // if there was an error, shove it to the list of errors + if err != nil { + + // create the map to hold the errors since at least one exists + if putItemsOutput.Errors == nil { + putItemsOutput.Errors = map[string]error{} + } + + putItemsOutput.Errors[itemKey] = err + + // clear success, since at least one error exists + putItemsOutput.Success = false + } + } + + response.Output = &putItemsOutput + + return response, nil +} + +// UpdateItem +func (c *context) UpdateItem(updateItemInput *v3io.UpdateItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(updateItemInput, context, responseChan) +} + +// UpdateItemSync +func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) (*v3io.Response, error) { + var err error + var response *v3io.Response + + if updateItemInput.Attributes != nil { + + // specify update mode as part of body. "Items" will be injected + body := map[string]interface{}{ + "UpdateMode": "CreateOrReplaceAttributes", + } + + if updateItemInput.UpdateMode != "" { + body["UpdateMode"] = updateItemInput.UpdateMode + } + + response, err = c.putItem(&updateItemInput.DataPlaneInput, + updateItemInput.Path, + putItemFunctionName, + updateItemInput.Attributes, + updateItemInput.Condition, + putItemHeaders, + body) + if err != nil { + return nil, err + } + + mtimeSecs, mtimeNSecs, err := parseMtimeHeader(response) + if err != nil { + return nil, err + } + response.Output = &v3io.UpdateItemOutput{MtimeSecs: mtimeSecs, MtimeNSecs: mtimeNSecs} + + } else if updateItemInput.Expression != nil { + + response, err = c.updateItemWithExpression(&updateItemInput.DataPlaneInput, + updateItemInput.Path, + updateItemFunctionName, + *updateItemInput.Expression, + updateItemInput.Condition, + updateItemHeaders, + updateItemInput.UpdateMode) + if err != nil { + return nil, err + } + + mtimeSecs, mtimeNSecs, err := parseMtimeHeader(response) + if err != nil { + return nil, err + } + response.Output = &v3io.UpdateItemOutput{MtimeSecs: mtimeSecs, MtimeNSecs: mtimeNSecs} + + } + + return response, err +} + +// GetObject +func (c *context) GetObject(getObjectInput *v3io.GetObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getObjectInput, context, responseChan) +} + +// GetObjectSync +func (c *context) GetObjectSync(getObjectInput *v3io.GetObjectInput) (*v3io.Response, error) { + var headers map[string]string + if getObjectInput.Offset != 0 || getObjectInput.NumBytes != 0 { + headers = make(map[string]string) + // Range header is inclusive in both 'start' and 'end', thus reducing 1 + headers["Range"] = fmt.Sprintf("bytes=%v-%v", getObjectInput.Offset, getObjectInput.Offset+getObjectInput.NumBytes-1) + } + + return c.sendRequest(&getObjectInput.DataPlaneInput, + http.MethodGet, + getObjectInput.Path, + "", + headers, + nil, + false) +} + +// PutObject +func (c *context) PutObject(putObjectInput *v3io.PutObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(putObjectInput, context, responseChan) +} + +// PutObjectSync +func (c *context) PutObjectSync(putObjectInput *v3io.PutObjectInput) error { + + var headers map[string]string + if putObjectInput.Append { + headers = make(map[string]string) + headers["Range"] = "-1" + } + + _, err := c.sendRequest(&putObjectInput.DataPlaneInput, + http.MethodPut, + putObjectInput.Path, + "", + headers, + putObjectInput.Body, + true) + + return err +} + +// DeleteObject +func (c *context) DeleteObject(deleteObjectInput *v3io.DeleteObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(deleteObjectInput, context, responseChan) +} + +// DeleteObjectSync +func (c *context) DeleteObjectSync(deleteObjectInput *v3io.DeleteObjectInput) error { + _, err := c.sendRequest(&deleteObjectInput.DataPlaneInput, + http.MethodDelete, + deleteObjectInput.Path, + "", + nil, + nil, + true) + + return err +} + +// CreateStream +func (c *context) CreateStream(createStreamInput *v3io.CreateStreamInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(createStreamInput, context, responseChan) +} + +// CreateStreamSync +func (c *context) CreateStreamSync(createStreamInput *v3io.CreateStreamInput) error { + body := fmt.Sprintf(`{"ShardCount": %d, "RetentionPeriodHours": %d}`, + createStreamInput.ShardCount, + createStreamInput.RetentionPeriodHours) + + _, err := c.sendRequest(&createStreamInput.DataPlaneInput, + http.MethodPost, + createStreamInput.Path, + "", + createStreamHeaders, + []byte(body), + true) + + return err +} + +// DescribeStream +func (c *context) DescribeStream(describeStreamInput *v3io.DescribeStreamInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(describeStreamInput, context, responseChan) +} + +// DescribeStreamSync +func (c *context) DescribeStreamSync(describeStreamInput *v3io.DescribeStreamInput) (*v3io.Response, error) { + response, err := c.sendRequest(&describeStreamInput.DataPlaneInput, + http.MethodPut, + describeStreamInput.Path, + "", + describeStreamHeaders, + nil, + false) + if err != nil { + return nil, err + } + + describeStreamOutput := v3io.DescribeStreamOutput{} + + // unmarshal the body into an ad hoc structure + err = json.Unmarshal(response.Body(), &describeStreamOutput) + if err != nil { + return nil, err + } + + // set the output in the response + response.Output = &describeStreamOutput + + return response, nil +} + +// checkPathExists +func (c *context) CheckPathExists(checkPathExistsInput *v3io.CheckPathExistsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(checkPathExistsInput, context, responseChan) +} + +// checkPathExistsSync +func (c *context) CheckPathExistsSync(checkPathExistsInput *v3io.CheckPathExistsInput) error { + _, err := c.sendRequest(&checkPathExistsInput.DataPlaneInput, + http.MethodHead, + checkPathExistsInput.Path, + "", + nil, + nil, + true) + return err +} + +// DeleteStream +func (c *context) DeleteStream(deleteStreamInput *v3io.DeleteStreamInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(deleteStreamInput, context, responseChan) +} + +// DeleteStreamSync +func (c *context) DeleteStreamSync(deleteStreamInput *v3io.DeleteStreamInput) error { + + // get all shards in the stream + response, err := c.GetContainerContentsSync(&v3io.GetContainerContentsInput{ + DataPlaneInput: deleteStreamInput.DataPlaneInput, + Path: deleteStreamInput.Path, + }) + + if err != nil { + return err + } + + defer response.Release() + + // delete the shards one by one + // TODO: paralellize + for _, content := range response.Output.(*v3io.GetContainerContentsOutput).Contents { + + // TODO: handle error - stop deleting? return multiple errors? + c.DeleteObjectSync(&v3io.DeleteObjectInput{ // nolint: errcheck + DataPlaneInput: deleteStreamInput.DataPlaneInput, + Path: "/" + content.Key, + }) + } + + // delete the actual stream + return c.DeleteObjectSync(&v3io.DeleteObjectInput{ + DataPlaneInput: deleteStreamInput.DataPlaneInput, + Path: "/" + path.Dir(deleteStreamInput.Path) + "/", + }) +} + +// SeekShard +func (c *context) SeekShard(seekShardInput *v3io.SeekShardInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(seekShardInput, context, responseChan) +} + +// SeekShardSync +func (c *context) SeekShardSync(seekShardInput *v3io.SeekShardInput) (*v3io.Response, error) { + var buffer bytes.Buffer + + buffer.WriteString(`{"Type": "`) + buffer.WriteString(seekShardsInputTypeToString[seekShardInput.Type]) + buffer.WriteString(`"`) + + if seekShardInput.Type == v3io.SeekShardInputTypeSequence { + buffer.WriteString(`, "StartingSequenceNumber": `) + buffer.WriteString(strconv.FormatUint(seekShardInput.StartingSequenceNumber, 10)) + } else if seekShardInput.Type == v3io.SeekShardInputTypeTime { + buffer.WriteString(`, "TimestampSec": `) + buffer.WriteString(strconv.Itoa(seekShardInput.Timestamp)) + buffer.WriteString(`, "TimestampNSec": 0`) + } + + buffer.WriteString(`}`) + + response, err := c.sendRequest(&seekShardInput.DataPlaneInput, + http.MethodPut, + seekShardInput.Path, + "", + seekShardsHeaders, + buffer.Bytes(), + false) + if err != nil { + return nil, err + } + + seekShardOutput := v3io.SeekShardOutput{} + + // unmarshal the body into an ad hoc structure + err = json.Unmarshal(response.Body(), &seekShardOutput) + if err != nil { + return nil, err + } + + // set the output in the response + response.Output = &seekShardOutput + + return response, nil +} + +// PutRecords +func (c *context) PutRecords(putRecordsInput *v3io.PutRecordsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(putRecordsInput, context, responseChan) +} + +// PutRecordsSync +func (c *context) PutRecordsSync(putRecordsInput *v3io.PutRecordsInput) (*v3io.Response, error) { + + // TODO: set this to an initial size through heuristics? + // This function encodes manually + var buffer bytes.Buffer + + buffer.WriteString(`{"Records": [`) + + for recordIdx, record := range putRecordsInput.Records { + buffer.WriteString(`{"Data": "`) + buffer.WriteString(base64.StdEncoding.EncodeToString(record.Data)) + buffer.WriteString(`"`) + + if record.ClientInfo != nil { + buffer.WriteString(`,"ClientInfo": "`) + buffer.WriteString(base64.StdEncoding.EncodeToString(record.ClientInfo)) + buffer.WriteString(`"`) + } + + if record.ShardID != nil { + buffer.WriteString(`, "ShardId": `) + buffer.WriteString(strconv.Itoa(*record.ShardID)) + } + + if record.PartitionKey != "" { + buffer.WriteString(`, "PartitionKey": `) + buffer.WriteString(`"` + record.PartitionKey + `"`) + } + + // add comma if not last + if recordIdx != len(putRecordsInput.Records)-1 { + buffer.WriteString(`}, `) + } else { + buffer.WriteString(`}`) + } + } + + buffer.WriteString(`]}`) + + response, err := c.sendRequest(&putRecordsInput.DataPlaneInput, + http.MethodPost, + putRecordsInput.Path, + "", + putRecordsHeaders, + buffer.Bytes(), + false) + if err != nil { + return nil, err + } + + putRecordsOutput := v3io.PutRecordsOutput{} + + // unmarshal the body into an ad hoc structure + err = json.Unmarshal(response.Body(), &putRecordsOutput) + if err != nil { + return nil, err + } + + // set the output in the response + response.Output = &putRecordsOutput + + return response, nil +} + +// GetRecords +func (c *context) GetRecords(getRecordsInput *v3io.GetRecordsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getRecordsInput, context, responseChan) +} + +// GetRecordsSync +func (c *context) GetRecordsSync(getRecordsInput *v3io.GetRecordsInput) (*v3io.Response, error) { + body := fmt.Sprintf(`{"Location": "%s", "Limit": %d}`, + getRecordsInput.Location, + getRecordsInput.Limit) + + response, err := c.sendRequest(&getRecordsInput.DataPlaneInput, + http.MethodPut, + getRecordsInput.Path, + "", + getRecordsHeaders, + []byte(body), + false) + if err != nil { + return nil, err + } + + getRecordsOutput := v3io.GetRecordsOutput{} + + // unmarshal the body into an ad hoc structure + err = json.Unmarshal(response.Body(), &getRecordsOutput) + if err != nil { + return nil, err + } + + // set the output in the response + response.Output = &getRecordsOutput + + return response, nil +} + +func (c *context) putItem(dataPlaneInput *v3io.DataPlaneInput, + path string, + functionName string, + attributes map[string]interface{}, + condition string, + headers map[string]string, + body map[string]interface{}) (*v3io.Response, error) { + + // iterate over all attributes and encode them with their types + typedAttributes, err := c.encodeTypedAttributes(attributes) + if err != nil { + return nil, err + } + + // create an empty body if the user didn't pass anything + if body == nil { + body = map[string]interface{}{} + } + + // set item in body (use what the user passed as a base) + body["Item"] = typedAttributes + + if condition != "" { + body["ConditionExpression"] = condition + } + + jsonEncodedBodyContents, err := json.Marshal(body) + if err != nil { + return nil, err + } + + return c.sendRequest(dataPlaneInput, + http.MethodPut, + path, + "", + headers, + jsonEncodedBodyContents, + false) +} + +func (c *context) updateItemWithExpression(dataPlaneInput *v3io.DataPlaneInput, + path string, + functionName string, + expression string, + condition string, + headers map[string]string, + updateMode string) (*v3io.Response, error) { + + body := map[string]interface{}{ + "UpdateExpression": expression, + "UpdateMode": "CreateOrReplaceAttributes", + } + + if updateMode != "" { + body["UpdateMode"] = updateMode + } + + if condition != "" { + body["ConditionExpression"] = condition + } + + jsonEncodedBodyContents, err := json.Marshal(body) + if err != nil { + return nil, err + } + + return c.sendRequest(dataPlaneInput, + http.MethodPost, + path, + "", + headers, + jsonEncodedBodyContents, + false) +} + +func (c *context) sendRequestAndXMLUnmarshal(dataPlaneInput *v3io.DataPlaneInput, + method string, + path string, + query string, + headers map[string]string, + body []byte, + output interface{}) (*v3io.Response, error) { + + response, err := c.sendRequest(dataPlaneInput, method, path, query, headers, body, false) + if err != nil { + return nil, err + } + + // unmarshal the body into the output + err = xml.Unmarshal(response.Body(), output) + if err != nil { + response.Release() + + return nil, err + } + + // set output in response + response.Output = output + + return response, nil +} + +func (c *context) sendRequest(dataPlaneInput *v3io.DataPlaneInput, + method string, + path string, + query string, + headers map[string]string, + body []byte, + releaseResponse bool) (*v3io.Response, error) { + + var success bool + var statusCode int + var err error + + if dataPlaneInput.ContainerName == "" { + return nil, errors.New("ContainerName must not be empty") + } + + request := fasthttp.AcquireRequest() + response := c.allocateResponse() + + uri, err := c.buildRequestURI(dataPlaneInput.URL, dataPlaneInput.ContainerName, query, path) + if err != nil { + return nil, err + } + uriStr := uri.String() + + // init request + request.SetRequestURI(uriStr) + request.Header.SetMethod(method) + request.SetBody(body) + + // check if we need to an an authorization header + if len(dataPlaneInput.AuthenticationToken) > 0 { + request.Header.Set("Authorization", dataPlaneInput.AuthenticationToken) + } + + if len(dataPlaneInput.AccessKey) > 0 { + request.Header.Set("X-v3io-session-key", dataPlaneInput.AccessKey) + } + + for headerName, headerValue := range headers { + request.Header.Add(headerName, headerValue) + } + + c.logger.DebugWithCtx(dataPlaneInput.Ctx, + "Tx", + "uri", uriStr, + "method", method, + "body-length", len(body)) + + if dataPlaneInput.Timeout <= 0 { + err = c.httpClient.Do(request, response.HTTPResponse) + } else { + err = c.httpClient.DoTimeout(request, response.HTTPResponse, dataPlaneInput.Timeout) + } + + if err != nil { + goto cleanup + } + + statusCode = response.HTTPResponse.StatusCode() + + { + contentLength := response.HTTPResponse.Header.ContentLength() + if contentLength < 0 { + contentLength = 0 + } + c.logger.DebugWithCtx(dataPlaneInput.Ctx, + "Rx", + "statusCode", statusCode, + "Content-Length", contentLength) + } + + // did we get a 2xx response? + success = statusCode >= 200 && statusCode < 300 + + // make sure we got expected status + if !success { + var re = regexp.MustCompile(".*X-V3io-Session-Key:.*") + sanitizedRequest := re.ReplaceAllString(request.String(), "X-V3io-Session-Key: SANITIZED") + err = v3ioerrors.NewErrorWithStatusCode( + fmt.Errorf("Expected a 2xx response status code: %s\nRequest details:\n%s", + response.HTTPResponse.String(), sanitizedRequest), + statusCode) + goto cleanup + } + +cleanup: + + // we're done with the request - the response must be released by the user + // unless there's an error + fasthttp.ReleaseRequest(request) + + if err != nil { + response.Release() + return nil, err + } + + // if the user doesn't need the response, release it + if releaseResponse { + response.Release() + return nil, nil + } + + return response, nil +} + +func (c *context) buildRequestURI(urlString string, containerName string, query string, pathStr string) (*url.URL, error) { + uri, err := url.Parse(urlString) + if err != nil { + return nil, errors.Wrapf(err, "Failed to parse cluster endpoint URL %s", c.clusterEndpoints[0]) + } + uri.Path = path.Clean(path.Join("/", containerName, pathStr)) + if strings.HasSuffix(pathStr, "/") { + uri.Path += "/" // retain trailing slash + } + uri.RawQuery = strings.Replace(query, " ", "%20", -1) + return uri, nil +} + +func (c *context) allocateResponse() *v3io.Response { + return &v3io.Response{ + HTTPResponse: fasthttp.AcquireResponse(), + } +} + +// {"age": 30, "name": "foo"} -> {"age": {"N": 30}, "name": {"S": "foo"}} +func (c *context) encodeTypedAttributes(attributes map[string]interface{}) (map[string]map[string]interface{}, error) { + typedAttributes := make(map[string]map[string]interface{}) + + for attributeName, attributeValue := range attributes { + typedAttributes[attributeName] = make(map[string]interface{}) + switch value := attributeValue.(type) { + default: + return nil, fmt.Errorf("unexpected attribute type for %s: %T", attributeName, reflect.TypeOf(attributeValue)) + case int: + typedAttributes[attributeName]["N"] = strconv.Itoa(value) + case uint64: + typedAttributes[attributeName]["N"] = strconv.FormatUint(value, 10) + case int64: + typedAttributes[attributeName]["N"] = strconv.FormatInt(value, 10) + // this is a tmp bypass to the fact Go maps Json numbers to float64 + case float64: + typedAttributes[attributeName]["N"] = strconv.FormatFloat(value, 'E', -1, 64) + case string: + typedAttributes[attributeName]["S"] = value + case []byte: + typedAttributes[attributeName]["B"] = base64.StdEncoding.EncodeToString(value) + case bool: + typedAttributes[attributeName]["BOOL"] = value + case time.Time: + nanos := value.UnixNano() + typedAttributes[attributeName]["TS"] = fmt.Sprintf("%v:%v", nanos/1000000000, nanos%1000000000) + } + } + + return typedAttributes, nil +} + +// {"age": {"N": 30}, "name": {"S": "foo"}} -> {"age": 30, "name": "foo"} +func (c *context) decodeTypedAttributes(typedAttributes map[string]map[string]interface{}) (map[string]interface{}, error) { + var err error + attributes := map[string]interface{}{} + + for attributeName, typedAttributeValue := range typedAttributes { + + typeError := func(attributeName string, attributeType string, value interface{}) error { + return errors.Errorf("Stated attribute type '%s' for attribute '%s' did not match actual attribute type '%T'", attributeType, attributeName, value) + } + + // try to parse as number + if value, ok := typedAttributeValue["N"]; ok { + numberValue, ok := value.(string) + if !ok { + return nil, typeError(attributeName, "N", value) + } + + // try int + if intValue, err := strconv.Atoi(numberValue); err != nil { + + // try float + floatValue, err := strconv.ParseFloat(numberValue, 64) + if err != nil { + return nil, fmt.Errorf("value for %s is not int or float: %s", attributeName, numberValue) + } + + // save as float + attributes[attributeName] = floatValue + } else { + attributes[attributeName] = intValue + } + } else if value, ok := typedAttributeValue["S"]; ok { + stringValue, ok := value.(string) + if !ok { + return nil, typeError(attributeName, "S", value) + } + + attributes[attributeName] = stringValue + } else if value, ok := typedAttributeValue["B"]; ok { + byteSliceValue, ok := value.(string) + if !ok { + return nil, typeError(attributeName, "B", value) + } + + attributes[attributeName], err = base64.StdEncoding.DecodeString(byteSliceValue) + if err != nil { + return nil, err + } + } else if value, ok := typedAttributeValue["BOOL"]; ok { + boolValue, ok := value.(bool) + if !ok { + return nil, typeError(attributeName, "BOOL", value) + } + + attributes[attributeName] = boolValue + } else if value, ok := typedAttributeValue["TS"]; ok { + timestampValue, ok := value.(string) + if !ok { + return nil, typeError(attributeName, "TS", value) + } + + timeParts := strings.Split(timestampValue, ":") + if len(timeParts) != 2 { + return nil, fmt.Errorf("incorrect format of timestamp value: %v", timestampValue) + } + + seconds, err := strconv.ParseInt(timeParts[0], 10, 64) + if err != nil { + return nil, err + } + nanos, err := strconv.ParseInt(timeParts[1], 10, 64) + if err != nil { + return nil, err + } + timeValue := time.Unix(seconds, nanos) + + attributes[attributeName] = timeValue + } + } + + return attributes, nil +} + +func (c *context) sendRequestToWorker(input interface{}, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + id := atomic.AddUint64(&requestID, 1) + + // create a request/response (TODO: from pool) + requestResponse := &v3io.RequestResponse{ + Request: v3io.Request{ + ID: id, + Input: input, + Context: context, + ResponseChan: responseChan, + SendTimeNanoseconds: time.Now().UnixNano(), + }, + } + + // point to container + requestResponse.Request.RequestResponse = requestResponse + + // send the request to the request channel + c.requestChan <- &requestResponse.Request + + return &requestResponse.Request, nil +} + +func (c *context) workerEntry(workerIndex int) { + for { + var response *v3io.Response + var err error + + // read a request + request := <-c.requestChan + + // according to the input type + switch typedInput := request.Input.(type) { + case *v3io.PutObjectInput: + err = c.PutObjectSync(typedInput) + case *v3io.GetObjectInput: + response, err = c.GetObjectSync(typedInput) + case *v3io.DeleteObjectInput: + err = c.DeleteObjectSync(typedInput) + case *v3io.GetItemInput: + response, err = c.GetItemSync(typedInput) + case *v3io.GetItemsInput: + response, err = c.GetItemsSync(typedInput) + case *v3io.PutItemInput: + response, err = c.PutItemSync(typedInput) + case *v3io.PutItemsInput: + response, err = c.PutItemsSync(typedInput) + case *v3io.UpdateItemInput: + response, err = c.UpdateItemSync(typedInput) + case *v3io.CreateStreamInput: + err = c.CreateStreamSync(typedInput) + case *v3io.DescribeStreamInput: + response, err = c.DescribeStreamSync(typedInput) + case *v3io.DeleteStreamInput: + err = c.DeleteStreamSync(typedInput) + case *v3io.GetRecordsInput: + response, err = c.GetRecordsSync(typedInput) + case *v3io.PutRecordsInput: + response, err = c.PutRecordsSync(typedInput) + case *v3io.SeekShardInput: + response, err = c.SeekShardSync(typedInput) + case *v3io.GetContainersInput: + response, err = c.GetContainersSync(typedInput) + case *v3io.GetContainerContentsInput: + response, err = c.GetContainerContentsSync(typedInput) + case *v3io.GetClusterMDInput: + response, err = c.GetClusterMDSync(typedInput) + case *v3io.CheckPathExistsInput: + err = c.CheckPathExistsSync(typedInput) + default: + c.logger.ErrorWith("Got unexpected request type", "type", reflect.TypeOf(request.Input).String()) + } + + // TODO: have the sync interfaces somehow use the pre-allocated response + if response != nil { + request.RequestResponse.Response = *response + } + + response = &request.RequestResponse.Response + + response.ID = request.ID + response.Error = err + response.RequestResponse = request.RequestResponse + response.Context = request.Context + + // write to response channel + request.ResponseChan <- &request.RequestResponse.Response + } +} + +func readAllCapnpMessages(reader io.Reader) []*capnp.Message { + var capnpMessages []*capnp.Message + for { + msg, err := capnp.NewDecoder(reader).Decode() + if err != nil { + break + } + capnpMessages = append(capnpMessages, msg) + } + return capnpMessages +} + +func getSectionAndIndex(values []attributeValuesSection, idx int) (section int, resIdx int) { + if len(values) == 1 { + return 0, idx + } + if idx < values[0].accumulatedPreviousSectionsLength { + return 0, idx + } + for i := 1; i < len(values); i++ { + if values[i].accumulatedPreviousSectionsLength > idx { + return i, idx - values[i-1].accumulatedPreviousSectionsLength + } + } + return 0, idx +} + +func decodeCapnpAttributes(keyValues node_common_capnp.VnObjectItemsGetMappedKeyValuePair_List, values []attributeValuesSection, attributeNames []string) (map[string]interface{}, error) { + attributes := map[string]interface{}{} + for j := 0; j < keyValues.Len(); j++ { + attrPtr := keyValues.At(j) + valIdx := int(attrPtr.ValueMapIndex()) + attrIdx := int(attrPtr.KeyMapIndex()) + + attributeName := attributeNames[attrIdx] + sectIdx, valIdx := getSectionAndIndex(values, valIdx) + value, err := values[sectIdx].data.At(valIdx).Value() + if err != nil { + return attributes, errors.Wrapf(err, "values[%d].data.At(%d).Value", sectIdx, valIdx) + } + switch value.Which() { + case node_common_capnp.ExtAttrValue_Which_qword: + attributes[attributeName] = int(value.Qword()) + case node_common_capnp.ExtAttrValue_Which_uqword: + attributes[attributeName] = int(value.Uqword()) + case node_common_capnp.ExtAttrValue_Which_blob: + attributes[attributeName], err = value.Blob() + if err != nil { + return attributes, errors.Wrapf(err, "unable to get value of BLOB attribute '%s'", attributeName) + } + case node_common_capnp.ExtAttrValue_Which_str: + attributes[attributeName], err = value.Str() + if err != nil { + return attributes, errors.Wrapf(err, "unable to get value of String attribute '%s'", attributeName) + } + case node_common_capnp.ExtAttrValue_Which_dfloat: + attributes[attributeName] = value.Dfloat() + case node_common_capnp.ExtAttrValue_Which_boolean: + attributes[attributeName] = value.Boolean() + case node_common_capnp.ExtAttrValue_Which_time: + t, err := value.Time() + if err != nil { + return nil, err + } + attributes[attributeName] = time.Unix(t.TvSec(), t.TvNsec()) + case node_common_capnp.ExtAttrValue_Which_notExists: + continue // skip + default: + return attributes, errors.Errorf("getItemsCapnp: %s type for %s attribute is not expected", value.Which().String(), attributeName) + } + } + return attributes, nil +} + +func (c *context) getItemsParseJSONResponse(response *v3io.Response, getItemsInput *v3io.GetItemsInput) (*v3io.GetItemsOutput, error) { + + getItemsResponse := struct { + Items []map[string]map[string]interface{} + NextMarker string + LastItemIncluded string + }{} + + // unmarshal the body into an ad hoc structure + err := json.Unmarshal(response.Body(), &getItemsResponse) + if err != nil { + return nil, err + } + + //validate getItems response to avoid infinite loop + if getItemsResponse.LastItemIncluded != "TRUE" && (getItemsResponse.NextMarker == "" || getItemsResponse.NextMarker == getItemsInput.Marker) { + errMsg := fmt.Sprintf("Invalid getItems response: lastItemIncluded=false and nextMarker='%s', "+ + "startMarker='%s', probably due to object size bigger than 2M. Query is: %+v", getItemsResponse.NextMarker, getItemsInput.Marker, getItemsInput) + c.logger.Warn(errMsg) + } + + getItemsOutput := v3io.GetItemsOutput{ + NextMarker: getItemsResponse.NextMarker, + Last: getItemsResponse.LastItemIncluded == "TRUE", + } + + // iterate through the items and decode them + for _, typedItem := range getItemsResponse.Items { + + item, err := c.decodeTypedAttributes(typedItem) + if err != nil { + return nil, err + } + + getItemsOutput.Items = append(getItemsOutput.Items, item) + } + // attach the output to the response + return &getItemsOutput, nil +} + +func (c *context) getItemsParseCAPNPResponse(response *v3io.Response, withWildcard bool) (*v3io.GetItemsOutput, error) { + responseBodyReader := bytes.NewReader(response.Body()) + capnpSections := readAllCapnpMessages(responseBodyReader) + if len(capnpSections) < 2 { + return nil, errors.Errorf("getItemsCapnp: Got only %v capnp sections. Expecting at least 2", len(capnpSections)) + } + cookie := string(response.HeaderPeek("X-v3io-cookie")) + getItemsOutput := v3io.GetItemsOutput{ + NextMarker: cookie, + Last: len(cookie) == 0, + } + if len(capnpSections) < 2 { + return nil, errors.Errorf("getItemsCapnp: Got only %v capnp sections. Expecting at least 2", len(capnpSections)) + } + + metadataPayload, err := node_common_capnp.ReadRootVnObjectItemsGetResponseMetadataPayload(capnpSections[len(capnpSections)-1]) + if err != nil { + return nil, errors.Wrap(err, "ReadRootVnObjectItemsGetResponseMetadataPayload") + } + //Keys + attributeMap, err := metadataPayload.KeyMap() + if err != nil { + return nil, errors.Wrap(err, "metadataPayload.KeyMap") + } + attributeMapNames, err := attributeMap.Names() + if err != nil { + return nil, errors.Wrap(err, "attributeMap.Names") + } + attributeNamesPtr, err := attributeMapNames.Arr() + if err != nil { + return nil, errors.Wrap(err, "attributeMapNames.Arr") + } + //Values + valueMap, err := metadataPayload.ValueMap() + if err != nil { + return nil, errors.Wrap(err, "metadataPayload.ValueMap") + } + values, err := valueMap.Values() + if err != nil { + return nil, errors.Wrap(err, "valueMap.Values") + } + + // Items + items, err := metadataPayload.Items() + if err != nil { + return nil, errors.Wrap(err, "metadataPayload.Items") + } + valuesSections := make([]attributeValuesSection, len(capnpSections)-1) + + accLength := 0 + //Additional data sections "in between" + for capnpSectionIndex := 1; capnpSectionIndex < len(capnpSections)-1; capnpSectionIndex++ { + data, err := node_common_capnp.ReadRootVnObjectItemsGetResponseDataPayload(capnpSections[capnpSectionIndex]) + if err != nil { + return nil, errors.Wrap(err, "node_common_capnp.ReadRootVnObjectAttributeValueMap") + } + dvmap, err := data.ValueMap() + if err != nil { + return nil, errors.Wrap(err, "data.ValueMap") + } + dv, err := dvmap.Values() + if err != nil { + return nil, errors.Wrap(err, "data.ValueMap.Values") + } + accLength = accLength + dv.Len() + valuesSections[capnpSectionIndex-1].data = dv + valuesSections[capnpSectionIndex-1].accumulatedPreviousSectionsLength = accLength + } + accLength = accLength + values.Len() + valuesSections[len(capnpSections)-2].data = values + valuesSections[len(capnpSections)-2].accumulatedPreviousSectionsLength = accLength + + //Read in all the attribute names + attributeNamesNumber := attributeNamesPtr.Len() + attributeNames := make([]string, attributeNamesNumber) + for attributeIndex := 0; attributeIndex < attributeNamesNumber; attributeIndex++ { + attributeNames[attributeIndex], err = attributeNamesPtr.At(attributeIndex).Str() + if err != nil { + return nil, errors.Wrapf(err, "attributeNamesPtr.At(%d) size %d", attributeIndex, attributeNamesNumber) + } + } + + // iterate through the items and decode them + for itemIndex := 0; itemIndex < items.Len(); itemIndex++ { + itemPtr := items.At(itemIndex) + item, err := itemPtr.Item() + if err != nil { + return nil, errors.Wrap(err, "itemPtr.Item") + } + itemAttributes, err := item.Attrs() + if err != nil { + return nil, errors.Wrap(err, "item.Attrs") + } + ditem, err := decodeCapnpAttributes(itemAttributes, valuesSections, attributeNames) + if err != nil { + return nil, errors.Wrap(err, "decodeCapnpAttributes") + } + if withWildcard { + name, err := item.Name() + if err != nil { + return nil, errors.Wrap(err, "item.Name") + } + ditem["__name"] = name + } + getItemsOutput.Items = append(getItemsOutput.Items, ditem) + } + return &getItemsOutput, nil +} + +// parsing the mtime from a header of the form `__mtime_secs==1581605100 and __mtime_nsecs==498349956` +func parseMtimeHeader(response *v3io.Response) (int, int, error) { + var mtimeSecs, mtimeNSecs int + var err error + + mtimeHeader := string(response.HeaderPeek("X-v3io-transaction-verifier")) + for _, expression := range strings.Split(mtimeHeader, "and") { + mtimeParts := strings.Split(expression, "==") + mtimeType := strings.TrimSpace(mtimeParts[0]) + if mtimeType == "__mtime_secs" { + mtimeSecs, err = trimAndParseInt(mtimeParts[1]) + if err != nil { + return 0, 0, err + } + } else if mtimeType == "__mtime_nsecs" { + mtimeNSecs, err = trimAndParseInt(mtimeParts[1]) + if err != nil { + return 0, 0, err + } + } else { + return 0, 0, fmt.Errorf("failed to parse 'X-v3io-transaction-verifier', unexpected symbol '%v' ", mtimeType) + } + } + + return mtimeSecs, mtimeNSecs, nil +} + +func trimAndParseInt(str string) (int, error) { + trimmed := strings.TrimSpace(str) + return strconv.Atoi(trimmed) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/headers.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/headers.go new file mode 100644 index 00000000..035eb095 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/headers.go @@ -0,0 +1,90 @@ +package v3iohttp + +// function names +const ( + putItemFunctionName = "PutItem" + updateItemFunctionName = "UpdateItem" + getItemFunctionName = "GetItem" + getItemsFunctionName = "GetItems" + createStreamFunctionName = "CreateStream" + describeStreamFunctionName = "DescribeStream" + putRecordsFunctionName = "PutRecords" + getRecordsFunctionName = "GetRecords" + seekShardsFunctionName = "SeekShard" + getClusterMDFunctionName = "GetClusterMD" +) + +// headers for put item +var putItemHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": putItemFunctionName, +} + +// headers for GetClusterMD +var getClusterMDHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": getClusterMDFunctionName, +} + +// headers for update item +var updateItemHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": updateItemFunctionName, +} + +// headers for update item +var getItemHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": getItemFunctionName, +} + +// headers for get items +var getItemsHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": getItemsFunctionName, +} + +// headers for get items requesting captain-proto response +var getItemsHeadersCapnp = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": getItemsFunctionName, + "X-v3io-response-content-type": "capnp", +} + +// headers for create stream +var createStreamHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": createStreamFunctionName, +} + +// headers for get records +var describeStreamHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": describeStreamFunctionName, +} + +// headers for put records +var putRecordsHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": putRecordsFunctionName, +} + +// headers for put records +var getRecordsHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": getRecordsFunctionName, +} + +// headers for seek records +var seekShardsHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": seekShardsFunctionName, +} + +// map between SeekShardInputType and its encoded counterpart +var seekShardsInputTypeToString = [...]string{ + "TIME", + "SEQUENCE", + "LATEST", + "EARLIEST", +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/session.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/session.go new file mode 100644 index 00000000..61455de3 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/session.go @@ -0,0 +1,53 @@ +package v3iohttp + +import ( + "encoding/base64" + "fmt" + + "github.com/v3io/v3io-go/pkg/dataplane" + + "github.com/nuclio/logger" +) + +type session struct { + logger logger.Logger + context *context + url string + authenticationToken string + accessKey string +} + +func newSession(parentLogger logger.Logger, + context *context, + url string, + username string, + password string, + accessKey string) (v3io.Session, error) { + + authenticationToken := "" + if username != "" && password != "" && accessKey == "" { + authenticationToken = GenerateAuthenticationToken(username, password) + } + + return &session{ + logger: parentLogger.GetChild("session"), + context: context, + url: url, + authenticationToken: authenticationToken, + accessKey: accessKey, + }, nil +} + +// NewContainer creates a container +func (s *session) NewContainer(newContainerInput *v3io.NewContainerInput) (v3io.Container, error) { + return newContainer(s.logger, s, newContainerInput.ContainerName) +} + +func GenerateAuthenticationToken(username string, password string) string { + + // generate token for basic authentication + usernameAndPassword := fmt.Sprintf("%s:%s", username, password) + encodedUsernameAndPassword := base64.StdEncoding.EncodeToString([]byte(usernameAndPassword)) + + return "Basic " + encodedUsernameAndPassword +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/types.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/types.go new file mode 100644 index 00000000..09830ff1 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/types.go @@ -0,0 +1,9 @@ +package v3iohttp + +import "github.com/valyala/fasthttp" + +type NewContextInput struct { + HTTPClient *fasthttp.Client + NumWorkers int + RequestChanLen int +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/item.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/item.go new file mode 100644 index 00000000..5bce099f --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/item.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +import ( + "strconv" + + "github.com/v3io/v3io-go/pkg/errors" +) + +type Item map[string]interface{} + +func (i Item) GetField(name string) interface{} { + return i[name] +} + +func (i Item) GetFieldInt(name string) (int, error) { + fieldValue, fieldFound := i[name] + if !fieldFound { + return 0, v3ioerrors.ErrNotFound + } + + switch typedField := fieldValue.(type) { + case int: + return typedField, nil + case float64: + return int(typedField), nil + case string: + return strconv.Atoi(typedField) + default: + return 0, v3ioerrors.ErrInvalidTypeConversion + } +} + +func (i Item) GetFieldString(name string) (string, error) { + fieldValue, fieldFound := i[name] + if !fieldFound { + return "", v3ioerrors.ErrNotFound + } + + switch typedField := fieldValue.(type) { + case int: + return strconv.Itoa(typedField), nil + case float64: + return strconv.FormatFloat(typedField, 'E', -1, 64), nil + case string: + return typedField, nil + default: + return "", v3ioerrors.ErrInvalidTypeConversion + } +} + +func (i Item) GetFieldUint64(name string) (uint64, error) { + fieldValue, fieldFound := i[name] + if !fieldFound { + return 0, v3ioerrors.ErrNotFound + } + + switch typedField := fieldValue.(type) { + // TODO: properly handle uint64 + case int: + return uint64(typedField), nil + case uint64: + return typedField, nil + default: + return 0, v3ioerrors.ErrInvalidTypeConversion + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/itemscursor.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/itemscursor.go new file mode 100644 index 00000000..c2e8e4f0 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/itemscursor.go @@ -0,0 +1,137 @@ +package v3io + +type ItemsCursor struct { + currentItem Item + currentError error + currentResponse *Response + nextMarker string + moreItemsExist bool + itemIndex int + items []Item + getItemsInput *GetItemsInput + container Container +} + +func NewItemsCursor(container Container, getItemsInput *GetItemsInput) (*ItemsCursor, error) { + newItemsCursor := &ItemsCursor{ + container: container, + getItemsInput: getItemsInput, + } + + response, err := container.GetItemsSync(getItemsInput) + if err != nil { + return nil, err + } + + newItemsCursor.setResponse(response) + + return newItemsCursor, nil +} + +// Err returns the last error +func (ic *ItemsCursor) Err() error { + return ic.currentError +} + +// Release releases a cursor and its underlying resources +func (ic *ItemsCursor) Release() { + if ic.currentResponse != nil { + ic.currentResponse.Release() + } +} + +// Next gets the next matching item. this may potentially block as this lazy loads items from the collection +func (ic *ItemsCursor) NextSync() bool { + item, err := ic.NextItemSync() + + if item == nil || err != nil { + return false + } + + return true +} + +// NextItem gets the next matching item. this may potentially block as this lazy loads items from the collection +func (ic *ItemsCursor) NextItemSync() (Item, error) { + + // are there any more items left in the previous response we received? + if ic.itemIndex < len(ic.items) { + ic.currentItem = ic.items[ic.itemIndex] + ic.currentError = nil + + // next time we'll give next item + ic.itemIndex++ + + return ic.currentItem, nil + } + + // are there any more items up stream? + if !ic.moreItemsExist { + ic.currentError = nil + return nil, nil + } + + // get the previous request input and modify it with the marker + ic.getItemsInput.Marker = ic.nextMarker + + // invoke get items + newResponse, err := ic.container.GetItemsSync(ic.getItemsInput) + if err != nil { + return nil, err + } + + // release the previous response + ic.currentResponse.Release() + + // set the new response - read all the sub information from it + ic.setResponse(newResponse) + + // and recurse into next now that we repopulated response + return ic.NextItemSync() +} + +// gets all items +func (ic *ItemsCursor) AllSync() ([]Item, error) { + var items []Item + + for ic.NextSync() { + items = append(items, ic.GetItem()) + } + + if ic.Err() != nil { + return nil, ic.Err() + } + + return items, nil +} + +func (ic *ItemsCursor) GetField(name string) interface{} { + return ic.currentItem[name] +} + +func (ic *ItemsCursor) GetFieldInt(name string) (int, error) { + return ic.currentItem.GetFieldInt(name) +} + +func (ic *ItemsCursor) GetFieldString(name string) (string, error) { + return ic.currentItem.GetFieldString(name) +} + +func (ic *ItemsCursor) GetFields() map[string]interface{} { + return ic.currentItem +} + +func (ic *ItemsCursor) GetItem() Item { + return ic.currentItem +} + +func (ic *ItemsCursor) setResponse(response *Response) { + ic.currentResponse = response + + getItemsOutput := response.Output.(*GetItemsOutput) + + ic.moreItemsExist = !getItemsOutput.Last + ic.nextMarker = getItemsOutput.NextMarker + ic.items = getItemsOutput.Items + ic.itemIndex = 0 +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/requestresponse.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/requestresponse.go new file mode 100644 index 00000000..aba012ef --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/requestresponse.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +import "github.com/valyala/fasthttp" + +type Request struct { + ID uint64 + + // holds the input (e.g. ListBucketInput, GetItemInput) + Input interface{} + + // a user supplied context + Context interface{} + + // the channel to which the response must be posted + ResponseChan chan *Response + + // pointer to container + RequestResponse *RequestResponse + + // Request time + SendTimeNanoseconds int64 +} + +type Response struct { + + // hold a decoded output, if any + Output interface{} + + // Equal to the ID of request + ID uint64 + + // holds the error for async responses + Error error + + // a user supplied context + Context interface{} + + // pointer to container + RequestResponse *RequestResponse + + // HTTP + HTTPResponse *fasthttp.Response +} + +func (r *Response) Release() { + if r.HTTPResponse != nil { + fasthttp.ReleaseResponse(r.HTTPResponse) + } +} + +func (r *Response) Body() []byte { + return r.HTTPResponse.Body() +} + +func (r *Response) HeaderPeek(key string) []byte { + return r.HTTPResponse.Header.Peek(key) +} + +func (r *Response) Request() *Request { + return &r.RequestResponse.Request +} + +// holds both a request and response +type RequestResponse struct { + Request Request + Response Response +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/ExtAttrValue.capnp b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/ExtAttrValue.capnp new file mode 100644 index 00000000..16544d70 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/ExtAttrValue.capnp @@ -0,0 +1,25 @@ +@0x8a6e4e6e3e2db81e; +using Go = import "/go.capnp"; +$Go.package("node_common_capnp"); +$Go.import("github.com/v3io/v3io-go/internal/schemas/node/common"); + +using Java = import "/java/java.capnp"; +$Java.package("io.iguaz.v3io.daemon.client.api.capnp"); +$Java.outerClassname("ExtAttrValueOuter"); + +using import "/node/common/TimeSpec.capnp".TimeSpec; + +struct ExtAttrValue{ + union { + qword @0 : Int64; + uqword @1 : UInt64; + blob @2 : Data; + notExists @3 : Void; + str @4 : Text; + qwordIncrement @5 : Int64; + time @6 : TimeSpec; + dfloat @7 : Float64; + floatIncrement @8 : Float64; + boolean @9 : Bool; + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/ExtAttrValue.capnp.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/ExtAttrValue.capnp.go new file mode 100644 index 00000000..0c14740b --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/ExtAttrValue.capnp.go @@ -0,0 +1,300 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package node_common_capnp + +import ( + math "math" + strconv "strconv" + capnp "zombiezen.com/go/capnproto2" + text "zombiezen.com/go/capnproto2/encoding/text" + schemas "zombiezen.com/go/capnproto2/schemas" +) + +type ExtAttrValue struct{ capnp.Struct } +type ExtAttrValue_Which uint16 + +const ( + ExtAttrValue_Which_qword ExtAttrValue_Which = 0 + ExtAttrValue_Which_uqword ExtAttrValue_Which = 1 + ExtAttrValue_Which_blob ExtAttrValue_Which = 2 + ExtAttrValue_Which_notExists ExtAttrValue_Which = 3 + ExtAttrValue_Which_str ExtAttrValue_Which = 4 + ExtAttrValue_Which_qwordIncrement ExtAttrValue_Which = 5 + ExtAttrValue_Which_time ExtAttrValue_Which = 6 + ExtAttrValue_Which_dfloat ExtAttrValue_Which = 7 + ExtAttrValue_Which_floatIncrement ExtAttrValue_Which = 8 + ExtAttrValue_Which_boolean ExtAttrValue_Which = 9 +) + +func (w ExtAttrValue_Which) String() string { + const s = "qworduqwordblobnotExistsstrqwordIncrementtimedfloatfloatIncrementboolean" + switch w { + case ExtAttrValue_Which_qword: + return s[0:5] + case ExtAttrValue_Which_uqword: + return s[5:11] + case ExtAttrValue_Which_blob: + return s[11:15] + case ExtAttrValue_Which_notExists: + return s[15:24] + case ExtAttrValue_Which_str: + return s[24:27] + case ExtAttrValue_Which_qwordIncrement: + return s[27:41] + case ExtAttrValue_Which_time: + return s[41:45] + case ExtAttrValue_Which_dfloat: + return s[45:51] + case ExtAttrValue_Which_floatIncrement: + return s[51:65] + case ExtAttrValue_Which_boolean: + return s[65:72] + + } + return "ExtAttrValue_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// ExtAttrValue_TypeID is the unique identifier for the type ExtAttrValue. +const ExtAttrValue_TypeID = 0x9bb0f31edcf7bd65 + +func NewExtAttrValue(s *capnp.Segment) (ExtAttrValue, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return ExtAttrValue{st}, err +} + +func NewRootExtAttrValue(s *capnp.Segment) (ExtAttrValue, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return ExtAttrValue{st}, err +} + +func ReadRootExtAttrValue(msg *capnp.Message) (ExtAttrValue, error) { + root, err := msg.RootPtr() + return ExtAttrValue{root.Struct()}, err +} + +func (s ExtAttrValue) String() string { + str, _ := text.Marshal(0x9bb0f31edcf7bd65, s.Struct) + return str +} + +func (s ExtAttrValue) Which() ExtAttrValue_Which { + return ExtAttrValue_Which(s.Struct.Uint16(8)) +} +func (s ExtAttrValue) Qword() int64 { + if s.Struct.Uint16(8) != 0 { + panic("Which() != qword") + } + return int64(s.Struct.Uint64(0)) +} + +func (s ExtAttrValue) SetQword(v int64) { + s.Struct.SetUint16(8, 0) + s.Struct.SetUint64(0, uint64(v)) +} + +func (s ExtAttrValue) Uqword() uint64 { + if s.Struct.Uint16(8) != 1 { + panic("Which() != uqword") + } + return s.Struct.Uint64(0) +} + +func (s ExtAttrValue) SetUqword(v uint64) { + s.Struct.SetUint16(8, 1) + s.Struct.SetUint64(0, v) +} + +func (s ExtAttrValue) Blob() ([]byte, error) { + if s.Struct.Uint16(8) != 2 { + panic("Which() != blob") + } + p, err := s.Struct.Ptr(0) + return []byte(p.Data()), err +} + +func (s ExtAttrValue) HasBlob() bool { + if s.Struct.Uint16(8) != 2 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s ExtAttrValue) SetBlob(v []byte) error { + s.Struct.SetUint16(8, 2) + return s.Struct.SetData(0, v) +} + +func (s ExtAttrValue) SetNotExists() { + s.Struct.SetUint16(8, 3) + +} + +func (s ExtAttrValue) Str() (string, error) { + if s.Struct.Uint16(8) != 4 { + panic("Which() != str") + } + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s ExtAttrValue) HasStr() bool { + if s.Struct.Uint16(8) != 4 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s ExtAttrValue) StrBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s ExtAttrValue) SetStr(v string) error { + s.Struct.SetUint16(8, 4) + return s.Struct.SetText(0, v) +} + +func (s ExtAttrValue) QwordIncrement() int64 { + if s.Struct.Uint16(8) != 5 { + panic("Which() != qwordIncrement") + } + return int64(s.Struct.Uint64(0)) +} + +func (s ExtAttrValue) SetQwordIncrement(v int64) { + s.Struct.SetUint16(8, 5) + s.Struct.SetUint64(0, uint64(v)) +} + +func (s ExtAttrValue) Time() (TimeSpec, error) { + if s.Struct.Uint16(8) != 6 { + panic("Which() != time") + } + p, err := s.Struct.Ptr(0) + return TimeSpec{Struct: p.Struct()}, err +} + +func (s ExtAttrValue) HasTime() bool { + if s.Struct.Uint16(8) != 6 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s ExtAttrValue) SetTime(v TimeSpec) error { + s.Struct.SetUint16(8, 6) + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewTime sets the time field to a newly +// allocated TimeSpec struct, preferring placement in s's segment. +func (s ExtAttrValue) NewTime() (TimeSpec, error) { + s.Struct.SetUint16(8, 6) + ss, err := NewTimeSpec(s.Struct.Segment()) + if err != nil { + return TimeSpec{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s ExtAttrValue) Dfloat() float64 { + if s.Struct.Uint16(8) != 7 { + panic("Which() != dfloat") + } + return math.Float64frombits(s.Struct.Uint64(0)) +} + +func (s ExtAttrValue) SetDfloat(v float64) { + s.Struct.SetUint16(8, 7) + s.Struct.SetUint64(0, math.Float64bits(v)) +} + +func (s ExtAttrValue) FloatIncrement() float64 { + if s.Struct.Uint16(8) != 8 { + panic("Which() != floatIncrement") + } + return math.Float64frombits(s.Struct.Uint64(0)) +} + +func (s ExtAttrValue) SetFloatIncrement(v float64) { + s.Struct.SetUint16(8, 8) + s.Struct.SetUint64(0, math.Float64bits(v)) +} + +func (s ExtAttrValue) Boolean() bool { + if s.Struct.Uint16(8) != 9 { + panic("Which() != boolean") + } + return s.Struct.Bit(0) +} + +func (s ExtAttrValue) SetBoolean(v bool) { + s.Struct.SetUint16(8, 9) + s.Struct.SetBit(0, v) +} + +// ExtAttrValue_List is a list of ExtAttrValue. +type ExtAttrValue_List struct{ capnp.List } + +// NewExtAttrValue creates a new list of ExtAttrValue. +func NewExtAttrValue_List(s *capnp.Segment, sz int32) (ExtAttrValue_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}, sz) + return ExtAttrValue_List{l}, err +} + +func (s ExtAttrValue_List) At(i int) ExtAttrValue { return ExtAttrValue{s.List.Struct(i)} } + +func (s ExtAttrValue_List) Set(i int, v ExtAttrValue) error { return s.List.SetStruct(i, v.Struct) } + +func (s ExtAttrValue_List) String() string { + str, _ := text.MarshalList(0x9bb0f31edcf7bd65, s.List) + return str +} + +// ExtAttrValue_Promise is a wrapper for a ExtAttrValue promised by a client call. +type ExtAttrValue_Promise struct{ *capnp.Pipeline } + +func (p ExtAttrValue_Promise) Struct() (ExtAttrValue, error) { + s, err := p.Pipeline.Struct() + return ExtAttrValue{s}, err +} + +func (p ExtAttrValue_Promise) Time() TimeSpec_Promise { + return TimeSpec_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +const schema_8a6e4e6e3e2db81e = "x\xdaL\xcfMk\x13]\x1c\x05\xf0s\xee\xccd2" + + "O\x13\x9a\x87{\x85nl\xabt\x11\xc5\xda\xd4V\xc4" + + "\"\xa6\x0aY\xb8\x11\xe3\x80\xfb\xbc\x8cP\x98\xcc\xc4\xe4" + + "\x86\x167\xee]\xf8\x05\\\xf9\x0dt!\xb8\x11\xf1;" + + "\xb8\xf4\x1b\xb8\x12\xc1\xfa\xfe\x97\xff\xf8\x82\xcbs\x7f\x9c" + + "s\xef\xedX\xee\x9b\xed\xe80\x00\xfa\xed\xa8&\xd9\xcb" + + "\xe3\xb7\xab\x1f\x9e>F\x7f\x8dFV_l^-n" + + "\x16\x0f\xd1c\xfc\x1f\xc3\x9d'\x1c\xd2>g\x0c\xec<" + + "\xe3%\x83\xb1\x14\xe58\xdb\x1a\x95\x93pR\x16[\xbd" + + "#\x7f\xcd\xfb\xd9\x9dA\xbe\xc8\xce\x8f\x06\xd3b\xba\xf7" + + "\xf7(\xce\x17\xd9-\xb2\xdf\x0e\xc2\x86HH\xc0&\xbc" + + "\x00\xa4!\x03\xa6-\x1a6\xf9C\x1c\x15\x9a\xdc\x03\xd2" + + "\xba\x82S0\xdf\xc5\xd1\x00\xf6\x7f\x9e\x05\xd2\x86\xc2\x8a" + + "B\xf0M\x1c\x03\xc0\x9e\xe0m u\x0a\xeb\x0a\xe1W" + + "q\x0c\x01{\x92\xa7\x81tEaC!\xfa\"\x8e\x11" + + "`O\xf1>\x90\xae+\x9cS\xa8}\x16\xc7\x1a`\xcf" + + "Twl(t\x14\xe2O\xe2\xf4\xc7v\xb3zU[" + + "aW\xa1~,\x8eu\xc0nWS\x1d\x85+\x0a\xc9" + + "GqL\x00{\x99\xd7\x81tWa\x9f\x86k\xf7\x0e" + + "\xcb\xd9\x98\x11\x0c#\xb0\xbb\xf8\x15\x13\x18&\xe0\xf20" + + "/\x87l\xc2\xb0\x09JQ\xfa\xde\xd1\xc1\xdc\x83s\xd4" + + "\xe2\xb9\x9f\xb1\x01\xc3\x06(U\xe9F1Bw\x96M" + + "\xb2\xc2\xff\x99[\xf6\x07\x93\x8c-y\xf3\xfa\xdd\xc5\xf7" + + "\x8f\xa6\xaf\x00\xb2\x05v\xc7w\xf3r\xe0\xb9\x04\xc3%" + + "P\xaa\xf4o\xfd7<\x18\x96e\x9e\x0d\x0a\x12\x86\x04" + + "\x7f\x06\x00\x00\xff\xff\xf4\x13pj" + +func init() { + schemas.Register(schema_8a6e4e6e3e2db81e, + 0x9bb0f31edcf7bd65) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/StringWrapper.capnp b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/StringWrapper.capnp new file mode 100644 index 00000000..d738558e --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/StringWrapper.capnp @@ -0,0 +1,16 @@ +@0xdf50359faf84cbef; +using Go = import "/go.capnp"; +$Go.package("node_common_capnp"); +$Go.import("github.com/v3io/v3io-go/internal/schemas/node/common"); + +using Java = import "/java/java.capnp"; +$Java.package("io.iguaz.v3io.daemon.client.api.capnp"); +$Java.outerClassname("StringWrapperOuter"); + +struct StringWrapper { + str @0 : Text; +} + +struct StringWrapperList{ + arr @0 : List(StringWrapper); +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/StringWrapper.capnp.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/StringWrapper.capnp.go new file mode 100644 index 00000000..9a02ec86 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/StringWrapper.capnp.go @@ -0,0 +1,179 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package node_common_capnp + +import ( + capnp "zombiezen.com/go/capnproto2" + text "zombiezen.com/go/capnproto2/encoding/text" + schemas "zombiezen.com/go/capnproto2/schemas" +) + +type StringWrapper struct{ capnp.Struct } + +// StringWrapper_TypeID is the unique identifier for the type StringWrapper. +const StringWrapper_TypeID = 0xf9282a9b6c819641 + +func NewStringWrapper(s *capnp.Segment) (StringWrapper, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return StringWrapper{st}, err +} + +func NewRootStringWrapper(s *capnp.Segment) (StringWrapper, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return StringWrapper{st}, err +} + +func ReadRootStringWrapper(msg *capnp.Message) (StringWrapper, error) { + root, err := msg.RootPtr() + return StringWrapper{root.Struct()}, err +} + +func (s StringWrapper) String() string { + str, _ := text.Marshal(0xf9282a9b6c819641, s.Struct) + return str +} + +func (s StringWrapper) Str() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s StringWrapper) HasStr() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s StringWrapper) StrBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s StringWrapper) SetStr(v string) error { + return s.Struct.SetText(0, v) +} + +// StringWrapper_List is a list of StringWrapper. +type StringWrapper_List struct{ capnp.List } + +// NewStringWrapper creates a new list of StringWrapper. +func NewStringWrapper_List(s *capnp.Segment, sz int32) (StringWrapper_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return StringWrapper_List{l}, err +} + +func (s StringWrapper_List) At(i int) StringWrapper { return StringWrapper{s.List.Struct(i)} } + +func (s StringWrapper_List) Set(i int, v StringWrapper) error { return s.List.SetStruct(i, v.Struct) } + +func (s StringWrapper_List) String() string { + str, _ := text.MarshalList(0xf9282a9b6c819641, s.List) + return str +} + +// StringWrapper_Promise is a wrapper for a StringWrapper promised by a client call. +type StringWrapper_Promise struct{ *capnp.Pipeline } + +func (p StringWrapper_Promise) Struct() (StringWrapper, error) { + s, err := p.Pipeline.Struct() + return StringWrapper{s}, err +} + +type StringWrapperList struct{ capnp.Struct } + +// StringWrapperList_TypeID is the unique identifier for the type StringWrapperList. +const StringWrapperList_TypeID = 0xecac44c78d0858b0 + +func NewStringWrapperList(s *capnp.Segment) (StringWrapperList, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return StringWrapperList{st}, err +} + +func NewRootStringWrapperList(s *capnp.Segment) (StringWrapperList, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return StringWrapperList{st}, err +} + +func ReadRootStringWrapperList(msg *capnp.Message) (StringWrapperList, error) { + root, err := msg.RootPtr() + return StringWrapperList{root.Struct()}, err +} + +func (s StringWrapperList) String() string { + str, _ := text.Marshal(0xecac44c78d0858b0, s.Struct) + return str +} + +func (s StringWrapperList) Arr() (StringWrapper_List, error) { + p, err := s.Struct.Ptr(0) + return StringWrapper_List{List: p.List()}, err +} + +func (s StringWrapperList) HasArr() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s StringWrapperList) SetArr(v StringWrapper_List) error { + return s.Struct.SetPtr(0, v.List.ToPtr()) +} + +// NewArr sets the arr field to a newly +// allocated StringWrapper_List, preferring placement in s's segment. +func (s StringWrapperList) NewArr(n int32) (StringWrapper_List, error) { + l, err := NewStringWrapper_List(s.Struct.Segment(), n) + if err != nil { + return StringWrapper_List{}, err + } + err = s.Struct.SetPtr(0, l.List.ToPtr()) + return l, err +} + +// StringWrapperList_List is a list of StringWrapperList. +type StringWrapperList_List struct{ capnp.List } + +// NewStringWrapperList creates a new list of StringWrapperList. +func NewStringWrapperList_List(s *capnp.Segment, sz int32) (StringWrapperList_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return StringWrapperList_List{l}, err +} + +func (s StringWrapperList_List) At(i int) StringWrapperList { + return StringWrapperList{s.List.Struct(i)} +} + +func (s StringWrapperList_List) Set(i int, v StringWrapperList) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s StringWrapperList_List) String() string { + str, _ := text.MarshalList(0xecac44c78d0858b0, s.List) + return str +} + +// StringWrapperList_Promise is a wrapper for a StringWrapperList promised by a client call. +type StringWrapperList_Promise struct{ *capnp.Pipeline } + +func (p StringWrapperList_Promise) Struct() (StringWrapperList, error) { + s, err := p.Pipeline.Struct() + return StringWrapperList{s}, err +} + +const schema_df50359faf84cbef = "x\xda\x12\x98\xe5\xc0d\xc8*\xcf\xcc\xc0\x10\xa8\xc2\xca" + + "\xf6\x7fC\x04G\xefq\x975o\x18\x04\x15\x18\xff\xbf" + + "?\xdd\xb2~\xbei\xc0}\x06VFv\x06\x06\xe3R" + + "\xc6I\x8c\xc2\xbd \xa6p'\xa3=\x03\xe3\x7f\xc7i" + + "\x8d9\xb3\xb54~bS\xbc\x93\xb1\x88Q\xf8,X" + + "\xf1IF{\x06\xbf\xffy\xf9)\xa9\xfa\xc9\xf9\xb9\xac" + + "\xb9\xf9y\xfa\xc1%E\x99y\xe9\xe1E\x89\x05\x05\xa9" + + "Ez\xc9\x89\x05y\x05V(b>\x99\xc5\x8c%\x01" + + "\x8c\x8c\x81,\xcc,\x0c\x0c,\x8c\x0c\x0c\x82\xbcJ\x0c" + + "\x0c\x81\x1c\xcc\x8c\x81*L\x8c\xec\x89EE\x8c|\x0c" + + "\x8c\x01\xcc\x8c\x8c\x02\x08w00\x82\x04\xe1V\xb1\x10" + + "\xb2J\x1e,\x88\xc3\x1a\x11&F\xf6\xe2\x92\"F\x1e" + + "\x06&F\x1e\x06F@\x00\x00\x00\xff\xff\x1cBQ%" + +func init() { + schemas.Register(schema_df50359faf84cbef, + 0xecac44c78d0858b0, + 0xf9282a9b6c819641) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/TimeSpec.capnp b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/TimeSpec.capnp new file mode 100644 index 00000000..f52d481a --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/TimeSpec.capnp @@ -0,0 +1,15 @@ +@0xbcbc7bd29390d6e8; +using Go = import "/go.capnp"; +$Go.package("node_common_capnp"); +$Go.import("github.com/v3io/v3io-go/internal/schemas/node/common"); + +# Imports & Namespace settings +using Java = import "/java/java.capnp"; +$Java.package("io.iguaz.v3io.daemon.client.api.capnp"); +$Java.outerClassname("V3ioTimeSpec"); + +struct TimeSpec { + tvSec @0 : Int64; + tvNsec @1 : Int64; +} + diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/TimeSpec.capnp.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/TimeSpec.capnp.go new file mode 100644 index 00000000..18498f1b --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/TimeSpec.capnp.go @@ -0,0 +1,93 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package node_common_capnp + +import ( + capnp "zombiezen.com/go/capnproto2" + text "zombiezen.com/go/capnproto2/encoding/text" + schemas "zombiezen.com/go/capnproto2/schemas" +) + +type TimeSpec struct{ capnp.Struct } + +// TimeSpec_TypeID is the unique identifier for the type TimeSpec. +const TimeSpec_TypeID = 0xbf708ef135ebc1d4 + +func NewTimeSpec(s *capnp.Segment) (TimeSpec, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}) + return TimeSpec{st}, err +} + +func NewRootTimeSpec(s *capnp.Segment) (TimeSpec, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}) + return TimeSpec{st}, err +} + +func ReadRootTimeSpec(msg *capnp.Message) (TimeSpec, error) { + root, err := msg.RootPtr() + return TimeSpec{root.Struct()}, err +} + +func (s TimeSpec) String() string { + str, _ := text.Marshal(0xbf708ef135ebc1d4, s.Struct) + return str +} + +func (s TimeSpec) TvSec() int64 { + return int64(s.Struct.Uint64(0)) +} + +func (s TimeSpec) SetTvSec(v int64) { + s.Struct.SetUint64(0, uint64(v)) +} + +func (s TimeSpec) TvNsec() int64 { + return int64(s.Struct.Uint64(8)) +} + +func (s TimeSpec) SetTvNsec(v int64) { + s.Struct.SetUint64(8, uint64(v)) +} + +// TimeSpec_List is a list of TimeSpec. +type TimeSpec_List struct{ capnp.List } + +// NewTimeSpec creates a new list of TimeSpec. +func NewTimeSpec_List(s *capnp.Segment, sz int32) (TimeSpec_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}, sz) + return TimeSpec_List{l}, err +} + +func (s TimeSpec_List) At(i int) TimeSpec { return TimeSpec{s.List.Struct(i)} } + +func (s TimeSpec_List) Set(i int, v TimeSpec) error { return s.List.SetStruct(i, v.Struct) } + +func (s TimeSpec_List) String() string { + str, _ := text.MarshalList(0xbf708ef135ebc1d4, s.List) + return str +} + +// TimeSpec_Promise is a wrapper for a TimeSpec promised by a client call. +type TimeSpec_Promise struct{ *capnp.Pipeline } + +func (p TimeSpec_Promise) Struct() (TimeSpec, error) { + s, err := p.Pipeline.Struct() + return TimeSpec{s}, err +} + +const schema_bcbc7bd29390d6e8 = "x\xda\x12ht`2d=\xce\xc4\xc0\x10\xa8\xc0\xca" + + "\xf6\xff\xca\xc1\xd7\xa6\x1f\xfb\x0a\xf63\x04J32\xfd" + + "\x7fqm\xc2\xe4K\xd5{\xf60\xb0\xb030\x18{" + + "2*1\x0aG2\xb230\x08\x872\x963\xf8\xfd" + + "\xcf\xcbOI\xd5O\xce\xcfe\xce\xcd\xcf\xd3\x0f\xc9\xcc" + + "M\x0d.HM\xd6KN,\xc8+\xb0\x02s\xd9\x0b" + + "R\x93\x03\x18\x19\x039\x98Y\x18\x18X\x18\x19\x18\x04" + + "5\x8d\x18\x18\x02U\x98\x19\x03\x0d\x98\x18\x05\x19\x19E" + + "\x18A\x82\xbaV\x0c\x0c\x81\x1a\xcc\x8c\x81&L\x8c\xf2" + + "%e\xc1\xa9\xc9\x8c\xac\x0cL\x8c\xac\x0c\x8c\xf6%e" + + "~\xc5\x08. \x00\x00\xff\xff\xe7k)\x10" + +func init() { + schemas.Register(schema_bcbc7bd29390d6e8, + 0xbf708ef135ebc1d4) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeKeyMap.capnp b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeKeyMap.capnp new file mode 100644 index 00000000..158ab822 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeKeyMap.capnp @@ -0,0 +1,13 @@ +@0x986bf57944c8b89f; +using Go = import "/go.capnp"; +$Go.package("node_common_capnp"); +$Go.import("github.com/v3io/v3io-go/internal/schemas/node/common"); + +using Java = import "/java/java.capnp"; +using import "/node/common/StringWrapper.capnp".StringWrapperList; +$Java.package("io.iguaz.v3io.daemon.client.api.capnp"); +$Java.outerClassname("VnObjectAttributeKeyMapOuter"); + +struct VnObjectAttributeKeyMap { + names @0 : StringWrapperList; +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeKeyMap.capnp.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeKeyMap.capnp.go new file mode 100644 index 00000000..9f64caf5 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeKeyMap.capnp.go @@ -0,0 +1,110 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package node_common_capnp + +import ( + capnp "zombiezen.com/go/capnproto2" + text "zombiezen.com/go/capnproto2/encoding/text" + schemas "zombiezen.com/go/capnproto2/schemas" +) + +type VnObjectAttributeKeyMap struct{ capnp.Struct } + +// VnObjectAttributeKeyMap_TypeID is the unique identifier for the type VnObjectAttributeKeyMap. +const VnObjectAttributeKeyMap_TypeID = 0x8f34d3b1223bf828 + +func NewVnObjectAttributeKeyMap(s *capnp.Segment) (VnObjectAttributeKeyMap, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectAttributeKeyMap{st}, err +} + +func NewRootVnObjectAttributeKeyMap(s *capnp.Segment) (VnObjectAttributeKeyMap, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectAttributeKeyMap{st}, err +} + +func ReadRootVnObjectAttributeKeyMap(msg *capnp.Message) (VnObjectAttributeKeyMap, error) { + root, err := msg.RootPtr() + return VnObjectAttributeKeyMap{root.Struct()}, err +} + +func (s VnObjectAttributeKeyMap) String() string { + str, _ := text.Marshal(0x8f34d3b1223bf828, s.Struct) + return str +} + +func (s VnObjectAttributeKeyMap) Names() (StringWrapperList, error) { + p, err := s.Struct.Ptr(0) + return StringWrapperList{Struct: p.Struct()}, err +} + +func (s VnObjectAttributeKeyMap) HasNames() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s VnObjectAttributeKeyMap) SetNames(v StringWrapperList) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewNames sets the names field to a newly +// allocated StringWrapperList struct, preferring placement in s's segment. +func (s VnObjectAttributeKeyMap) NewNames() (StringWrapperList, error) { + ss, err := NewStringWrapperList(s.Struct.Segment()) + if err != nil { + return StringWrapperList{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// VnObjectAttributeKeyMap_List is a list of VnObjectAttributeKeyMap. +type VnObjectAttributeKeyMap_List struct{ capnp.List } + +// NewVnObjectAttributeKeyMap creates a new list of VnObjectAttributeKeyMap. +func NewVnObjectAttributeKeyMap_List(s *capnp.Segment, sz int32) (VnObjectAttributeKeyMap_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return VnObjectAttributeKeyMap_List{l}, err +} + +func (s VnObjectAttributeKeyMap_List) At(i int) VnObjectAttributeKeyMap { + return VnObjectAttributeKeyMap{s.List.Struct(i)} +} + +func (s VnObjectAttributeKeyMap_List) Set(i int, v VnObjectAttributeKeyMap) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectAttributeKeyMap_List) String() string { + str, _ := text.MarshalList(0x8f34d3b1223bf828, s.List) + return str +} + +// VnObjectAttributeKeyMap_Promise is a wrapper for a VnObjectAttributeKeyMap promised by a client call. +type VnObjectAttributeKeyMap_Promise struct{ *capnp.Pipeline } + +func (p VnObjectAttributeKeyMap_Promise) Struct() (VnObjectAttributeKeyMap, error) { + s, err := p.Pipeline.Struct() + return VnObjectAttributeKeyMap{s}, err +} + +func (p VnObjectAttributeKeyMap_Promise) Names() StringWrapperList_Promise { + return StringWrapperList_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +const schema_986bf57944c8b89f = "x\xda\x12\x98\xee\xc0d\xc8z\x9e\x99\x81!P\x87\x95" + + "\xed\xbf\xc6\x0fk\xa5\x8d\x97M\xfa\x19\x04\xb5\x18\xff\xcf" + + "\xdfq\xc2\xa5\xf2k\xf6\x0c\x06VFv\x06\x06\xe3\xb3" + + "\x8cBL\xc2oAL\xe1\x97\x8c\xf6\x0c\xf9\xff\xf3\xf2" + + "SR\xf5\x93\xf3s\xd9s\xf3\xf3\xf4\xc3\xf2\xfc\x93\xb2" + + "R\x93K\x1cKJ\x8a2\x93JKR\xbdS+}" + + "\x13\x0b\xf4\x92\x13\x0b\xf2\x0a\xac\xb0\xcb2\x16\x0402" + + "\x06\xb20\xb300\xb0020\x08\xf2\x1a10\x04" + + "r03\x06\x8a01\xca\xe7%\xe6\xa6\x163\x0a\xfc" + + "\xdf\x10\xc1\xd1{\xdce\xcd\x1b\x06\x06FF\x01\x06F" + + "@\x00\x00\x00\xff\xffo#4\xb9" + +func init() { + schemas.Register(schema_986bf57944c8b89f, + 0x8f34d3b1223bf828) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeValueMap.capnp b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeValueMap.capnp new file mode 100644 index 00000000..6741a9e6 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeValueMap.capnp @@ -0,0 +1,17 @@ +@0x90687959836864ab; +using Go = import "/go.capnp"; +$Go.package("node_common_capnp"); +$Go.import("github.com/v3io/v3io-go/internal/schemas/node/common"); + +using Java = import "/java/java.capnp"; +using import "/node/common/ExtAttrValue.capnp".ExtAttrValue; +$Java.package("io.iguaz.v3io.daemon.client.api.capnp"); +$Java.outerClassname("VnObjectAttributeValueMapOuter"); + +struct VnObjectAttributeValuePtr { + value @0 : ExtAttrValue; +} + +struct VnObjectAttributeValueMap { + values @0 : List(VnObjectAttributeValuePtr); +} \ No newline at end of file diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeValueMap.capnp.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeValueMap.capnp.go new file mode 100644 index 00000000..417e6095 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectAttributeValueMap.capnp.go @@ -0,0 +1,195 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package node_common_capnp + +import ( + capnp "zombiezen.com/go/capnproto2" + text "zombiezen.com/go/capnproto2/encoding/text" + schemas "zombiezen.com/go/capnproto2/schemas" +) + +type VnObjectAttributeValuePtr struct{ capnp.Struct } + +// VnObjectAttributeValuePtr_TypeID is the unique identifier for the type VnObjectAttributeValuePtr. +const VnObjectAttributeValuePtr_TypeID = 0xd5b5408ea84873aa + +func NewVnObjectAttributeValuePtr(s *capnp.Segment) (VnObjectAttributeValuePtr, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectAttributeValuePtr{st}, err +} + +func NewRootVnObjectAttributeValuePtr(s *capnp.Segment) (VnObjectAttributeValuePtr, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectAttributeValuePtr{st}, err +} + +func ReadRootVnObjectAttributeValuePtr(msg *capnp.Message) (VnObjectAttributeValuePtr, error) { + root, err := msg.RootPtr() + return VnObjectAttributeValuePtr{root.Struct()}, err +} + +func (s VnObjectAttributeValuePtr) String() string { + str, _ := text.Marshal(0xd5b5408ea84873aa, s.Struct) + return str +} + +func (s VnObjectAttributeValuePtr) Value() (ExtAttrValue, error) { + p, err := s.Struct.Ptr(0) + return ExtAttrValue{Struct: p.Struct()}, err +} + +func (s VnObjectAttributeValuePtr) HasValue() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s VnObjectAttributeValuePtr) SetValue(v ExtAttrValue) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewValue sets the value field to a newly +// allocated ExtAttrValue struct, preferring placement in s's segment. +func (s VnObjectAttributeValuePtr) NewValue() (ExtAttrValue, error) { + ss, err := NewExtAttrValue(s.Struct.Segment()) + if err != nil { + return ExtAttrValue{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// VnObjectAttributeValuePtr_List is a list of VnObjectAttributeValuePtr. +type VnObjectAttributeValuePtr_List struct{ capnp.List } + +// NewVnObjectAttributeValuePtr creates a new list of VnObjectAttributeValuePtr. +func NewVnObjectAttributeValuePtr_List(s *capnp.Segment, sz int32) (VnObjectAttributeValuePtr_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return VnObjectAttributeValuePtr_List{l}, err +} + +func (s VnObjectAttributeValuePtr_List) At(i int) VnObjectAttributeValuePtr { + return VnObjectAttributeValuePtr{s.List.Struct(i)} +} + +func (s VnObjectAttributeValuePtr_List) Set(i int, v VnObjectAttributeValuePtr) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectAttributeValuePtr_List) String() string { + str, _ := text.MarshalList(0xd5b5408ea84873aa, s.List) + return str +} + +// VnObjectAttributeValuePtr_Promise is a wrapper for a VnObjectAttributeValuePtr promised by a client call. +type VnObjectAttributeValuePtr_Promise struct{ *capnp.Pipeline } + +func (p VnObjectAttributeValuePtr_Promise) Struct() (VnObjectAttributeValuePtr, error) { + s, err := p.Pipeline.Struct() + return VnObjectAttributeValuePtr{s}, err +} + +func (p VnObjectAttributeValuePtr_Promise) Value() ExtAttrValue_Promise { + return ExtAttrValue_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +type VnObjectAttributeValueMap struct{ capnp.Struct } + +// VnObjectAttributeValueMap_TypeID is the unique identifier for the type VnObjectAttributeValueMap. +const VnObjectAttributeValueMap_TypeID = 0xf1e4da8ee5d92bf9 + +func NewVnObjectAttributeValueMap(s *capnp.Segment) (VnObjectAttributeValueMap, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectAttributeValueMap{st}, err +} + +func NewRootVnObjectAttributeValueMap(s *capnp.Segment) (VnObjectAttributeValueMap, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectAttributeValueMap{st}, err +} + +func ReadRootVnObjectAttributeValueMap(msg *capnp.Message) (VnObjectAttributeValueMap, error) { + root, err := msg.RootPtr() + return VnObjectAttributeValueMap{root.Struct()}, err +} + +func (s VnObjectAttributeValueMap) String() string { + str, _ := text.Marshal(0xf1e4da8ee5d92bf9, s.Struct) + return str +} + +func (s VnObjectAttributeValueMap) Values() (VnObjectAttributeValuePtr_List, error) { + p, err := s.Struct.Ptr(0) + return VnObjectAttributeValuePtr_List{List: p.List()}, err +} + +func (s VnObjectAttributeValueMap) HasValues() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s VnObjectAttributeValueMap) SetValues(v VnObjectAttributeValuePtr_List) error { + return s.Struct.SetPtr(0, v.List.ToPtr()) +} + +// NewValues sets the values field to a newly +// allocated VnObjectAttributeValuePtr_List, preferring placement in s's segment. +func (s VnObjectAttributeValueMap) NewValues(n int32) (VnObjectAttributeValuePtr_List, error) { + l, err := NewVnObjectAttributeValuePtr_List(s.Struct.Segment(), n) + if err != nil { + return VnObjectAttributeValuePtr_List{}, err + } + err = s.Struct.SetPtr(0, l.List.ToPtr()) + return l, err +} + +// VnObjectAttributeValueMap_List is a list of VnObjectAttributeValueMap. +type VnObjectAttributeValueMap_List struct{ capnp.List } + +// NewVnObjectAttributeValueMap creates a new list of VnObjectAttributeValueMap. +func NewVnObjectAttributeValueMap_List(s *capnp.Segment, sz int32) (VnObjectAttributeValueMap_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return VnObjectAttributeValueMap_List{l}, err +} + +func (s VnObjectAttributeValueMap_List) At(i int) VnObjectAttributeValueMap { + return VnObjectAttributeValueMap{s.List.Struct(i)} +} + +func (s VnObjectAttributeValueMap_List) Set(i int, v VnObjectAttributeValueMap) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectAttributeValueMap_List) String() string { + str, _ := text.MarshalList(0xf1e4da8ee5d92bf9, s.List) + return str +} + +// VnObjectAttributeValueMap_Promise is a wrapper for a VnObjectAttributeValueMap promised by a client call. +type VnObjectAttributeValueMap_Promise struct{ *capnp.Pipeline } + +func (p VnObjectAttributeValueMap_Promise) Struct() (VnObjectAttributeValueMap, error) { + s, err := p.Pipeline.Struct() + return VnObjectAttributeValueMap{s}, err +} + +const schema_90687959836864ab = "x\xda\x128\xed\xc0d\xc8Z\xcf\xc2\xc0\x10h\xc2\xca" + + "\xf6\x7fU\xb1\xc7\x8a>\x87\xadW\x19\x04u\x18\xff\xaf" + + "N\xc9h\x8e\xac\xcc\x98\xc0\xc0\xca\xc8\xce\xc0`\xac\xca" + + "d\xc4$\xec\xca\xc4\xce\xc0 \xec\xc8d\xcf\xc0\xf8\xff" + + "\xa7\xf6\xcd\xa7}\xb7\x9e|\xc4\xa68\x11\xa4\xb8\x11\xac" + + "\xb8\x96\xc9\x9e\xa1\xea\x7f^~J\xaa~r~.{" + + "n~\x9e~X\x9e\x7fRVjr\x89cIIQ" + + "fRiIjXbNi\xaaob\x81^rb" + + "A^\x81\x156y\xf9\xd2\xd4\x80\x92\xa2\x00F\xc6@" + + "\x16f\x16\x06\x06\x16F\x06\x06A^#\x06\x86@\x0e" + + "f\xc6@\x11&F\xf92\x90\x19\x8c\x02\xffS\xf7~" + + "\xbf#\xf7y\xc3l\x06\x06FF\x01\x06F\xaa\xd8\xec" + + "\x9bX\x80f\xb3\x15\xd4f\x15&F{\xb0\xcd\xc5\x8c" + + "|\x0c\x8c\x01\xcc\x8c\x8c\x02\x88Pd`\x04\x09\x02\x02" + + "\x00\x00\xff\xffS\x0fje" + +func init() { + schemas.Register(schema_90687959836864ab, + 0xd5b5408ea84873aa, + 0xf1e4da8ee5d92bf9) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsGetResponse.capnp b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsGetResponse.capnp new file mode 100644 index 00000000..d3e7e757 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsGetResponse.capnp @@ -0,0 +1,54 @@ +@0xdfe00955984fcb17; +using Go = import "/go.capnp"; +$Go.package("node_common_capnp"); +$Go.import("github.com/v3io/v3io-go/internal/schemas/node/common"); + + +# Imports & Namespace settings +using Java = import "/java/java.capnp"; +$Java.package("io.iguaz.v3io.daemon.client.api.capnp"); +$Java.outerClassname("VnObjectItemsGetResponse"); + +using import "/node/common/VnObjectItemsScanCookie.capnp".VnObjectItemsScanCookie; +using import "/node/common/VnObjectAttributeKeyMap.capnp".VnObjectAttributeKeyMap; +using import "/node/common/VnObjectAttributeValueMap.capnp".VnObjectAttributeValueMap; + +struct VnObjectItemsGetResponseHeader{ + marker @0 : Text; + scanState @1 : VnObjectItemsScanCookie; + hasMore @2 : Bool; + numItems @3 : UInt64; + numKeys @4 : UInt64; + numValues @5 : UInt64; +} + +struct VnObjectItemsGetMappedKeyValuePair { + keyMapIndex @0 :UInt64; + valueMapIndex @1 :UInt64; +} + +struct VnObjectItemsGetItem{ + name @0 :Text; + attrs @1 :List(VnObjectItemsGetMappedKeyValuePair); +} + +# Wrapper so that we can create orphan VnObjectItemsGetItem objects and then fill out a list of pointers +# to them. See https://capnproto.org/faq.html under "How do I resize a list?" (28/08/2016): +# "Keep in mind that you can use orphans to allocate sub-objects before you have a place to put them. But, also +# note that you cannot allocate elements of a struct list as orphans and then put them together as a list later, +# because struct lists are encoded as a flat array of struct values, not an array of pointers to struct values. +# You can, however, allocate any inner objects embedded within those structs as orphans." + +struct VnObjectItemsGetItemPtr{ + item @0: VnObjectItemsGetItem; +} + +struct VnObjectItemsGetResponseDataPayload{ + valueMap @0 :VnObjectAttributeValueMap; +} + +struct VnObjectItemsGetResponseMetadataPayload{ + valueMap @0 :VnObjectAttributeValueMap; + keyMap @1 :VnObjectAttributeKeyMap; + items @2 :List(VnObjectItemsGetItemPtr); +} \ No newline at end of file diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsGetResponse.capnp.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsGetResponse.capnp.go new file mode 100644 index 00000000..66ead630 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsGetResponse.capnp.go @@ -0,0 +1,678 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package node_common_capnp + +import ( + capnp "zombiezen.com/go/capnproto2" + text "zombiezen.com/go/capnproto2/encoding/text" + schemas "zombiezen.com/go/capnproto2/schemas" +) + +type VnObjectItemsGetResponseHeader struct{ capnp.Struct } + +// VnObjectItemsGetResponseHeader_TypeID is the unique identifier for the type VnObjectItemsGetResponseHeader. +const VnObjectItemsGetResponseHeader_TypeID = 0x85032dcfe77493da + +func NewVnObjectItemsGetResponseHeader(s *capnp.Segment) (VnObjectItemsGetResponseHeader, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 32, PointerCount: 2}) + return VnObjectItemsGetResponseHeader{st}, err +} + +func NewRootVnObjectItemsGetResponseHeader(s *capnp.Segment) (VnObjectItemsGetResponseHeader, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 32, PointerCount: 2}) + return VnObjectItemsGetResponseHeader{st}, err +} + +func ReadRootVnObjectItemsGetResponseHeader(msg *capnp.Message) (VnObjectItemsGetResponseHeader, error) { + root, err := msg.RootPtr() + return VnObjectItemsGetResponseHeader{root.Struct()}, err +} + +func (s VnObjectItemsGetResponseHeader) String() string { + str, _ := text.Marshal(0x85032dcfe77493da, s.Struct) + return str +} + +func (s VnObjectItemsGetResponseHeader) Marker() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s VnObjectItemsGetResponseHeader) HasMarker() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s VnObjectItemsGetResponseHeader) MarkerBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s VnObjectItemsGetResponseHeader) SetMarker(v string) error { + return s.Struct.SetText(0, v) +} + +func (s VnObjectItemsGetResponseHeader) ScanState() (VnObjectItemsScanCookie, error) { + p, err := s.Struct.Ptr(1) + return VnObjectItemsScanCookie{Struct: p.Struct()}, err +} + +func (s VnObjectItemsGetResponseHeader) HasScanState() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s VnObjectItemsGetResponseHeader) SetScanState(v VnObjectItemsScanCookie) error { + return s.Struct.SetPtr(1, v.Struct.ToPtr()) +} + +// NewScanState sets the scanState field to a newly +// allocated VnObjectItemsScanCookie struct, preferring placement in s's segment. +func (s VnObjectItemsGetResponseHeader) NewScanState() (VnObjectItemsScanCookie, error) { + ss, err := NewVnObjectItemsScanCookie(s.Struct.Segment()) + if err != nil { + return VnObjectItemsScanCookie{}, err + } + err = s.Struct.SetPtr(1, ss.Struct.ToPtr()) + return ss, err +} + +func (s VnObjectItemsGetResponseHeader) HasMore() bool { + return s.Struct.Bit(0) +} + +func (s VnObjectItemsGetResponseHeader) SetHasMore(v bool) { + s.Struct.SetBit(0, v) +} + +func (s VnObjectItemsGetResponseHeader) NumItems() uint64 { + return s.Struct.Uint64(8) +} + +func (s VnObjectItemsGetResponseHeader) SetNumItems(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s VnObjectItemsGetResponseHeader) NumKeys() uint64 { + return s.Struct.Uint64(16) +} + +func (s VnObjectItemsGetResponseHeader) SetNumKeys(v uint64) { + s.Struct.SetUint64(16, v) +} + +func (s VnObjectItemsGetResponseHeader) NumValues() uint64 { + return s.Struct.Uint64(24) +} + +func (s VnObjectItemsGetResponseHeader) SetNumValues(v uint64) { + s.Struct.SetUint64(24, v) +} + +// VnObjectItemsGetResponseHeader_List is a list of VnObjectItemsGetResponseHeader. +type VnObjectItemsGetResponseHeader_List struct{ capnp.List } + +// NewVnObjectItemsGetResponseHeader creates a new list of VnObjectItemsGetResponseHeader. +func NewVnObjectItemsGetResponseHeader_List(s *capnp.Segment, sz int32) (VnObjectItemsGetResponseHeader_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 32, PointerCount: 2}, sz) + return VnObjectItemsGetResponseHeader_List{l}, err +} + +func (s VnObjectItemsGetResponseHeader_List) At(i int) VnObjectItemsGetResponseHeader { + return VnObjectItemsGetResponseHeader{s.List.Struct(i)} +} + +func (s VnObjectItemsGetResponseHeader_List) Set(i int, v VnObjectItemsGetResponseHeader) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectItemsGetResponseHeader_List) String() string { + str, _ := text.MarshalList(0x85032dcfe77493da, s.List) + return str +} + +// VnObjectItemsGetResponseHeader_Promise is a wrapper for a VnObjectItemsGetResponseHeader promised by a client call. +type VnObjectItemsGetResponseHeader_Promise struct{ *capnp.Pipeline } + +func (p VnObjectItemsGetResponseHeader_Promise) Struct() (VnObjectItemsGetResponseHeader, error) { + s, err := p.Pipeline.Struct() + return VnObjectItemsGetResponseHeader{s}, err +} + +func (p VnObjectItemsGetResponseHeader_Promise) ScanState() VnObjectItemsScanCookie_Promise { + return VnObjectItemsScanCookie_Promise{Pipeline: p.Pipeline.GetPipeline(1)} +} + +type VnObjectItemsGetMappedKeyValuePair struct{ capnp.Struct } + +// VnObjectItemsGetMappedKeyValuePair_TypeID is the unique identifier for the type VnObjectItemsGetMappedKeyValuePair. +const VnObjectItemsGetMappedKeyValuePair_TypeID = 0xd26f89592f5fc95f + +func NewVnObjectItemsGetMappedKeyValuePair(s *capnp.Segment) (VnObjectItemsGetMappedKeyValuePair, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}) + return VnObjectItemsGetMappedKeyValuePair{st}, err +} + +func NewRootVnObjectItemsGetMappedKeyValuePair(s *capnp.Segment) (VnObjectItemsGetMappedKeyValuePair, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}) + return VnObjectItemsGetMappedKeyValuePair{st}, err +} + +func ReadRootVnObjectItemsGetMappedKeyValuePair(msg *capnp.Message) (VnObjectItemsGetMappedKeyValuePair, error) { + root, err := msg.RootPtr() + return VnObjectItemsGetMappedKeyValuePair{root.Struct()}, err +} + +func (s VnObjectItemsGetMappedKeyValuePair) String() string { + str, _ := text.Marshal(0xd26f89592f5fc95f, s.Struct) + return str +} + +func (s VnObjectItemsGetMappedKeyValuePair) KeyMapIndex() uint64 { + return s.Struct.Uint64(0) +} + +func (s VnObjectItemsGetMappedKeyValuePair) SetKeyMapIndex(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s VnObjectItemsGetMappedKeyValuePair) ValueMapIndex() uint64 { + return s.Struct.Uint64(8) +} + +func (s VnObjectItemsGetMappedKeyValuePair) SetValueMapIndex(v uint64) { + s.Struct.SetUint64(8, v) +} + +// VnObjectItemsGetMappedKeyValuePair_List is a list of VnObjectItemsGetMappedKeyValuePair. +type VnObjectItemsGetMappedKeyValuePair_List struct{ capnp.List } + +// NewVnObjectItemsGetMappedKeyValuePair creates a new list of VnObjectItemsGetMappedKeyValuePair. +func NewVnObjectItemsGetMappedKeyValuePair_List(s *capnp.Segment, sz int32) (VnObjectItemsGetMappedKeyValuePair_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}, sz) + return VnObjectItemsGetMappedKeyValuePair_List{l}, err +} + +func (s VnObjectItemsGetMappedKeyValuePair_List) At(i int) VnObjectItemsGetMappedKeyValuePair { + return VnObjectItemsGetMappedKeyValuePair{s.List.Struct(i)} +} + +func (s VnObjectItemsGetMappedKeyValuePair_List) Set(i int, v VnObjectItemsGetMappedKeyValuePair) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectItemsGetMappedKeyValuePair_List) String() string { + str, _ := text.MarshalList(0xd26f89592f5fc95f, s.List) + return str +} + +// VnObjectItemsGetMappedKeyValuePair_Promise is a wrapper for a VnObjectItemsGetMappedKeyValuePair promised by a client call. +type VnObjectItemsGetMappedKeyValuePair_Promise struct{ *capnp.Pipeline } + +func (p VnObjectItemsGetMappedKeyValuePair_Promise) Struct() (VnObjectItemsGetMappedKeyValuePair, error) { + s, err := p.Pipeline.Struct() + return VnObjectItemsGetMappedKeyValuePair{s}, err +} + +type VnObjectItemsGetItem struct{ capnp.Struct } + +// VnObjectItemsGetItem_TypeID is the unique identifier for the type VnObjectItemsGetItem. +const VnObjectItemsGetItem_TypeID = 0xeacf121c1705f7c7 + +func NewVnObjectItemsGetItem(s *capnp.Segment) (VnObjectItemsGetItem, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return VnObjectItemsGetItem{st}, err +} + +func NewRootVnObjectItemsGetItem(s *capnp.Segment) (VnObjectItemsGetItem, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return VnObjectItemsGetItem{st}, err +} + +func ReadRootVnObjectItemsGetItem(msg *capnp.Message) (VnObjectItemsGetItem, error) { + root, err := msg.RootPtr() + return VnObjectItemsGetItem{root.Struct()}, err +} + +func (s VnObjectItemsGetItem) String() string { + str, _ := text.Marshal(0xeacf121c1705f7c7, s.Struct) + return str +} + +func (s VnObjectItemsGetItem) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s VnObjectItemsGetItem) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s VnObjectItemsGetItem) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s VnObjectItemsGetItem) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s VnObjectItemsGetItem) Attrs() (VnObjectItemsGetMappedKeyValuePair_List, error) { + p, err := s.Struct.Ptr(1) + return VnObjectItemsGetMappedKeyValuePair_List{List: p.List()}, err +} + +func (s VnObjectItemsGetItem) HasAttrs() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s VnObjectItemsGetItem) SetAttrs(v VnObjectItemsGetMappedKeyValuePair_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewAttrs sets the attrs field to a newly +// allocated VnObjectItemsGetMappedKeyValuePair_List, preferring placement in s's segment. +func (s VnObjectItemsGetItem) NewAttrs(n int32) (VnObjectItemsGetMappedKeyValuePair_List, error) { + l, err := NewVnObjectItemsGetMappedKeyValuePair_List(s.Struct.Segment(), n) + if err != nil { + return VnObjectItemsGetMappedKeyValuePair_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +// VnObjectItemsGetItem_List is a list of VnObjectItemsGetItem. +type VnObjectItemsGetItem_List struct{ capnp.List } + +// NewVnObjectItemsGetItem creates a new list of VnObjectItemsGetItem. +func NewVnObjectItemsGetItem_List(s *capnp.Segment, sz int32) (VnObjectItemsGetItem_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz) + return VnObjectItemsGetItem_List{l}, err +} + +func (s VnObjectItemsGetItem_List) At(i int) VnObjectItemsGetItem { + return VnObjectItemsGetItem{s.List.Struct(i)} +} + +func (s VnObjectItemsGetItem_List) Set(i int, v VnObjectItemsGetItem) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectItemsGetItem_List) String() string { + str, _ := text.MarshalList(0xeacf121c1705f7c7, s.List) + return str +} + +// VnObjectItemsGetItem_Promise is a wrapper for a VnObjectItemsGetItem promised by a client call. +type VnObjectItemsGetItem_Promise struct{ *capnp.Pipeline } + +func (p VnObjectItemsGetItem_Promise) Struct() (VnObjectItemsGetItem, error) { + s, err := p.Pipeline.Struct() + return VnObjectItemsGetItem{s}, err +} + +type VnObjectItemsGetItemPtr struct{ capnp.Struct } + +// VnObjectItemsGetItemPtr_TypeID is the unique identifier for the type VnObjectItemsGetItemPtr. +const VnObjectItemsGetItemPtr_TypeID = 0xf020cf2eadb0357c + +func NewVnObjectItemsGetItemPtr(s *capnp.Segment) (VnObjectItemsGetItemPtr, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectItemsGetItemPtr{st}, err +} + +func NewRootVnObjectItemsGetItemPtr(s *capnp.Segment) (VnObjectItemsGetItemPtr, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectItemsGetItemPtr{st}, err +} + +func ReadRootVnObjectItemsGetItemPtr(msg *capnp.Message) (VnObjectItemsGetItemPtr, error) { + root, err := msg.RootPtr() + return VnObjectItemsGetItemPtr{root.Struct()}, err +} + +func (s VnObjectItemsGetItemPtr) String() string { + str, _ := text.Marshal(0xf020cf2eadb0357c, s.Struct) + return str +} + +func (s VnObjectItemsGetItemPtr) Item() (VnObjectItemsGetItem, error) { + p, err := s.Struct.Ptr(0) + return VnObjectItemsGetItem{Struct: p.Struct()}, err +} + +func (s VnObjectItemsGetItemPtr) HasItem() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s VnObjectItemsGetItemPtr) SetItem(v VnObjectItemsGetItem) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewItem sets the item field to a newly +// allocated VnObjectItemsGetItem struct, preferring placement in s's segment. +func (s VnObjectItemsGetItemPtr) NewItem() (VnObjectItemsGetItem, error) { + ss, err := NewVnObjectItemsGetItem(s.Struct.Segment()) + if err != nil { + return VnObjectItemsGetItem{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// VnObjectItemsGetItemPtr_List is a list of VnObjectItemsGetItemPtr. +type VnObjectItemsGetItemPtr_List struct{ capnp.List } + +// NewVnObjectItemsGetItemPtr creates a new list of VnObjectItemsGetItemPtr. +func NewVnObjectItemsGetItemPtr_List(s *capnp.Segment, sz int32) (VnObjectItemsGetItemPtr_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return VnObjectItemsGetItemPtr_List{l}, err +} + +func (s VnObjectItemsGetItemPtr_List) At(i int) VnObjectItemsGetItemPtr { + return VnObjectItemsGetItemPtr{s.List.Struct(i)} +} + +func (s VnObjectItemsGetItemPtr_List) Set(i int, v VnObjectItemsGetItemPtr) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectItemsGetItemPtr_List) String() string { + str, _ := text.MarshalList(0xf020cf2eadb0357c, s.List) + return str +} + +// VnObjectItemsGetItemPtr_Promise is a wrapper for a VnObjectItemsGetItemPtr promised by a client call. +type VnObjectItemsGetItemPtr_Promise struct{ *capnp.Pipeline } + +func (p VnObjectItemsGetItemPtr_Promise) Struct() (VnObjectItemsGetItemPtr, error) { + s, err := p.Pipeline.Struct() + return VnObjectItemsGetItemPtr{s}, err +} + +func (p VnObjectItemsGetItemPtr_Promise) Item() VnObjectItemsGetItem_Promise { + return VnObjectItemsGetItem_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +type VnObjectItemsGetResponseDataPayload struct{ capnp.Struct } + +// VnObjectItemsGetResponseDataPayload_TypeID is the unique identifier for the type VnObjectItemsGetResponseDataPayload. +const VnObjectItemsGetResponseDataPayload_TypeID = 0xbb85e91da7f4c0a3 + +func NewVnObjectItemsGetResponseDataPayload(s *capnp.Segment) (VnObjectItemsGetResponseDataPayload, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectItemsGetResponseDataPayload{st}, err +} + +func NewRootVnObjectItemsGetResponseDataPayload(s *capnp.Segment) (VnObjectItemsGetResponseDataPayload, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return VnObjectItemsGetResponseDataPayload{st}, err +} + +func ReadRootVnObjectItemsGetResponseDataPayload(msg *capnp.Message) (VnObjectItemsGetResponseDataPayload, error) { + root, err := msg.RootPtr() + return VnObjectItemsGetResponseDataPayload{root.Struct()}, err +} + +func (s VnObjectItemsGetResponseDataPayload) String() string { + str, _ := text.Marshal(0xbb85e91da7f4c0a3, s.Struct) + return str +} + +func (s VnObjectItemsGetResponseDataPayload) ValueMap() (VnObjectAttributeValueMap, error) { + p, err := s.Struct.Ptr(0) + return VnObjectAttributeValueMap{Struct: p.Struct()}, err +} + +func (s VnObjectItemsGetResponseDataPayload) HasValueMap() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s VnObjectItemsGetResponseDataPayload) SetValueMap(v VnObjectAttributeValueMap) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewValueMap sets the valueMap field to a newly +// allocated VnObjectAttributeValueMap struct, preferring placement in s's segment. +func (s VnObjectItemsGetResponseDataPayload) NewValueMap() (VnObjectAttributeValueMap, error) { + ss, err := NewVnObjectAttributeValueMap(s.Struct.Segment()) + if err != nil { + return VnObjectAttributeValueMap{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// VnObjectItemsGetResponseDataPayload_List is a list of VnObjectItemsGetResponseDataPayload. +type VnObjectItemsGetResponseDataPayload_List struct{ capnp.List } + +// NewVnObjectItemsGetResponseDataPayload creates a new list of VnObjectItemsGetResponseDataPayload. +func NewVnObjectItemsGetResponseDataPayload_List(s *capnp.Segment, sz int32) (VnObjectItemsGetResponseDataPayload_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return VnObjectItemsGetResponseDataPayload_List{l}, err +} + +func (s VnObjectItemsGetResponseDataPayload_List) At(i int) VnObjectItemsGetResponseDataPayload { + return VnObjectItemsGetResponseDataPayload{s.List.Struct(i)} +} + +func (s VnObjectItemsGetResponseDataPayload_List) Set(i int, v VnObjectItemsGetResponseDataPayload) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectItemsGetResponseDataPayload_List) String() string { + str, _ := text.MarshalList(0xbb85e91da7f4c0a3, s.List) + return str +} + +// VnObjectItemsGetResponseDataPayload_Promise is a wrapper for a VnObjectItemsGetResponseDataPayload promised by a client call. +type VnObjectItemsGetResponseDataPayload_Promise struct{ *capnp.Pipeline } + +func (p VnObjectItemsGetResponseDataPayload_Promise) Struct() (VnObjectItemsGetResponseDataPayload, error) { + s, err := p.Pipeline.Struct() + return VnObjectItemsGetResponseDataPayload{s}, err +} + +func (p VnObjectItemsGetResponseDataPayload_Promise) ValueMap() VnObjectAttributeValueMap_Promise { + return VnObjectAttributeValueMap_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +type VnObjectItemsGetResponseMetadataPayload struct{ capnp.Struct } + +// VnObjectItemsGetResponseMetadataPayload_TypeID is the unique identifier for the type VnObjectItemsGetResponseMetadataPayload. +const VnObjectItemsGetResponseMetadataPayload_TypeID = 0xb4008849dd7a3304 + +func NewVnObjectItemsGetResponseMetadataPayload(s *capnp.Segment) (VnObjectItemsGetResponseMetadataPayload, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 3}) + return VnObjectItemsGetResponseMetadataPayload{st}, err +} + +func NewRootVnObjectItemsGetResponseMetadataPayload(s *capnp.Segment) (VnObjectItemsGetResponseMetadataPayload, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 3}) + return VnObjectItemsGetResponseMetadataPayload{st}, err +} + +func ReadRootVnObjectItemsGetResponseMetadataPayload(msg *capnp.Message) (VnObjectItemsGetResponseMetadataPayload, error) { + root, err := msg.RootPtr() + return VnObjectItemsGetResponseMetadataPayload{root.Struct()}, err +} + +func (s VnObjectItemsGetResponseMetadataPayload) String() string { + str, _ := text.Marshal(0xb4008849dd7a3304, s.Struct) + return str +} + +func (s VnObjectItemsGetResponseMetadataPayload) ValueMap() (VnObjectAttributeValueMap, error) { + p, err := s.Struct.Ptr(0) + return VnObjectAttributeValueMap{Struct: p.Struct()}, err +} + +func (s VnObjectItemsGetResponseMetadataPayload) HasValueMap() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s VnObjectItemsGetResponseMetadataPayload) SetValueMap(v VnObjectAttributeValueMap) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewValueMap sets the valueMap field to a newly +// allocated VnObjectAttributeValueMap struct, preferring placement in s's segment. +func (s VnObjectItemsGetResponseMetadataPayload) NewValueMap() (VnObjectAttributeValueMap, error) { + ss, err := NewVnObjectAttributeValueMap(s.Struct.Segment()) + if err != nil { + return VnObjectAttributeValueMap{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s VnObjectItemsGetResponseMetadataPayload) KeyMap() (VnObjectAttributeKeyMap, error) { + p, err := s.Struct.Ptr(1) + return VnObjectAttributeKeyMap{Struct: p.Struct()}, err +} + +func (s VnObjectItemsGetResponseMetadataPayload) HasKeyMap() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s VnObjectItemsGetResponseMetadataPayload) SetKeyMap(v VnObjectAttributeKeyMap) error { + return s.Struct.SetPtr(1, v.Struct.ToPtr()) +} + +// NewKeyMap sets the keyMap field to a newly +// allocated VnObjectAttributeKeyMap struct, preferring placement in s's segment. +func (s VnObjectItemsGetResponseMetadataPayload) NewKeyMap() (VnObjectAttributeKeyMap, error) { + ss, err := NewVnObjectAttributeKeyMap(s.Struct.Segment()) + if err != nil { + return VnObjectAttributeKeyMap{}, err + } + err = s.Struct.SetPtr(1, ss.Struct.ToPtr()) + return ss, err +} + +func (s VnObjectItemsGetResponseMetadataPayload) Items() (VnObjectItemsGetItemPtr_List, error) { + p, err := s.Struct.Ptr(2) + return VnObjectItemsGetItemPtr_List{List: p.List()}, err +} + +func (s VnObjectItemsGetResponseMetadataPayload) HasItems() bool { + p, err := s.Struct.Ptr(2) + return p.IsValid() || err != nil +} + +func (s VnObjectItemsGetResponseMetadataPayload) SetItems(v VnObjectItemsGetItemPtr_List) error { + return s.Struct.SetPtr(2, v.List.ToPtr()) +} + +// NewItems sets the items field to a newly +// allocated VnObjectItemsGetItemPtr_List, preferring placement in s's segment. +func (s VnObjectItemsGetResponseMetadataPayload) NewItems(n int32) (VnObjectItemsGetItemPtr_List, error) { + l, err := NewVnObjectItemsGetItemPtr_List(s.Struct.Segment(), n) + if err != nil { + return VnObjectItemsGetItemPtr_List{}, err + } + err = s.Struct.SetPtr(2, l.List.ToPtr()) + return l, err +} + +// VnObjectItemsGetResponseMetadataPayload_List is a list of VnObjectItemsGetResponseMetadataPayload. +type VnObjectItemsGetResponseMetadataPayload_List struct{ capnp.List } + +// NewVnObjectItemsGetResponseMetadataPayload creates a new list of VnObjectItemsGetResponseMetadataPayload. +func NewVnObjectItemsGetResponseMetadataPayload_List(s *capnp.Segment, sz int32) (VnObjectItemsGetResponseMetadataPayload_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 3}, sz) + return VnObjectItemsGetResponseMetadataPayload_List{l}, err +} + +func (s VnObjectItemsGetResponseMetadataPayload_List) At(i int) VnObjectItemsGetResponseMetadataPayload { + return VnObjectItemsGetResponseMetadataPayload{s.List.Struct(i)} +} + +func (s VnObjectItemsGetResponseMetadataPayload_List) Set(i int, v VnObjectItemsGetResponseMetadataPayload) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectItemsGetResponseMetadataPayload_List) String() string { + str, _ := text.MarshalList(0xb4008849dd7a3304, s.List) + return str +} + +// VnObjectItemsGetResponseMetadataPayload_Promise is a wrapper for a VnObjectItemsGetResponseMetadataPayload promised by a client call. +type VnObjectItemsGetResponseMetadataPayload_Promise struct{ *capnp.Pipeline } + +func (p VnObjectItemsGetResponseMetadataPayload_Promise) Struct() (VnObjectItemsGetResponseMetadataPayload, error) { + s, err := p.Pipeline.Struct() + return VnObjectItemsGetResponseMetadataPayload{s}, err +} + +func (p VnObjectItemsGetResponseMetadataPayload_Promise) ValueMap() VnObjectAttributeValueMap_Promise { + return VnObjectAttributeValueMap_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p VnObjectItemsGetResponseMetadataPayload_Promise) KeyMap() VnObjectAttributeKeyMap_Promise { + return VnObjectAttributeKeyMap_Promise{Pipeline: p.Pipeline.GetPipeline(1)} +} + +const schema_dfe00955984fcb17 = "x\xda\xac\x94OH\\W\x14\xc6\xcfw\xef\x9by3" + + "0\xad\xf3:\x03\xdaR\x99\xb6t\xa1\x15\xff\xdbEm" + + "e\xdab\xa9S;8\xd7\xb6\x96\xae\xecu\xe6\x82V" + + "\xe7\xcd\xf0\xe6\xd9v\xa4\xe0\xa6B\xbbh\xbb\xb0\x8b\xb6" + + "T\x10B\x88!J\"($d\x11w\x12\xb2\x10\x12" + + "\xb2\x88\x10\x92EB\xfe\xac\x12HH\\\xbdp\xc7q" + + "\x9c\x0c\x09\x01\xcd\xea\xbdw\xee\xc7}\xbf\xef;\xe7\xde" + + "\x8e4\xfb\x98u\xfa\x86\x82Db\xca\xe7\xf7\xb6\xe7\xdd" + + "[[\xad|\x8eD\x0b\x0c\xaf\xfe\xc2\xd0?\xdf\x04\xaf" + + "_#\x1f3\x89\xba\xff0\x86Yd\xd5\xd0\xaf+\xc6" + + "\xb7 \x9c3\xbag\xae&~[\xb3ZP\xa5\xe5Z" + + "\xe0\xf3\xff\xc7\"\xad~\x93(\xd2\xec?I\xf0\x8el" + + "<8\xd6xg\xee,=-\x86\x16_\xf6\xcf\xb0\xc8" + + "NI\xfc\xd0\x1f'x\xa3\xe7G\xdb\xbf\xfb=wQ" + + "c\xb0}u\xe9\xd7\xaf\x9b\x0e\x8b\xf4\x99Z\xfc\x81\xf9" + + "\x13\xc1\xdb|\xe4\xab\x7f\xf3\xb5\xad\xbb5;\x97\x90\x17" + + "L\xc6\"gJ\xe2\xf5\x92\xf8\x97\xf7O\xad\xb4m\xbd" + + "u\xefY\x18o\x07\xde`\x91O\x02Z\xdc\x17\x88\xd3" + + "m\xcf\xceeT{:\x97\x0ddsv\xfb\x88=4" + + "\xf6\x83J\xbb\x09We\x0b\x9f+wX\x15\xf29\xbb" + + "\xa0\xda\xd22o\xe7{\x9f\xb7<\xa0dF\xc1I\x01" + + "\xa2\x81\x1bD\x06\x88\xac\x7f{\x89\xc4\xdf\x1cb\x91\xc1" + + "\x02\xa2\xd0\xc5\x85a\"\xf1?\x87Xb\x00\x8b\x82\x11" + + "YG?%\x12\x8b\x1cb\x99\xc1\xe2\x88\x82\x13Y\xc7" + + "\xbf \x12K\x1cb\x8d\xc12X\x14\x06\x91\xb5\xaa\x95" + + "\xcb\x1c\xe24\x83\xe5\xe3Q\xf8\x88\xacu\xbd\xe5\x1a\x87" + + "\xd8`\x88g\xa53\xa9\x1c\x84\x88!D\xf0\x0aii" + + "\x7f\xe5J\x97\xa0\x10\xf6\xbe\x9f\xdf<\xd1\x18`\xbf\x12" + + "\x01a\xc2\xec\xb8,$s\x8e\x02\x88\x01\x04\xcf\x9e\xce" + + "\x96\x9c\x11\x11\x82\xc4\x10$\xcc\xda\xd3\xd9AU,\xec" + + "}k\xcd\x88\x9c\x9aV\x84\xaaZ9\xc2\xe0!\"L" + + "*Wf\xa4+S\xb28\x95\xe32\xa3\xb3\x0cU\xb2" + + "\xfcL\xa7\xd1\xcf!RUY&u\xc0\x03\x1c\xe2k" + + "\x06\x8b\x95\xc3\x14]D\xe2K\x0e1\xce\xe0\xfd\xa8I" + + "\x932\xaf\x0d\x85\xbd\x9d\x96+7\xff\xdc\xbeq\xbfl" + + "?>\xa9\x8aI\x99G\xd8kz\xfc\xe1;\xab\x97z" + + "\xfe*/\xc4&4\x1b^%\xa48\x10\xde\x1f'\x82" + + ".\xbe\x94\x89\xe9\xd7V\xe3\xda\xeb\xaeU\xa3b\xf5\x15" + + "m5\xc4!\x1a^d\xe0P I\x99\xcf\xab\xcc\xa0" + + "*\xean\xc6TJN\x94\xc67P\xe1h\x1e#\x12" + + "M\x1c\xa2\xa7*\xf2N\x87Htp\x88\x8f\x18\xbc\xdd" + + "\xfc\x126\x99\x19\xf5se\x18*\xc8\xb1\x84]]\xdf" + + "\x835\x0f\x00\xab\x9fT\x83\xf7\x1e\x91x\x97CtT" + + "\xe1\xb5v\x95\x99\xfb\x19\xeal\x99U{\x07!&]" + + "\xd7\xa9ji\xe5\xee\xa9i\xe9A\xe1R\xdcuj\xda" + + "\xa8\xf9\x02\x1c\"\xcaP\xa7\x07\x0a\xe1\xfdKl\xb7\x7f" + + "O\x02\x00\x00\xff\xff\xb4\xc6\x85\xbd" + +func init() { + schemas.Register(schema_dfe00955984fcb17, + 0x85032dcfe77493da, + 0xb4008849dd7a3304, + 0xbb85e91da7f4c0a3, + 0xd26f89592f5fc95f, + 0xeacf121c1705f7c7, + 0xf020cf2eadb0357c) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsScanCookie.capnp b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsScanCookie.capnp new file mode 100644 index 00000000..87312483 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsScanCookie.capnp @@ -0,0 +1,16 @@ +@0xb56ec2d13b48b7cb; +using Go = import "/go.capnp"; +$Go.package("node_common_capnp"); +$Go.import("github.com/v3io/v3io-go/internal/schemas/node/common"); + +# Imports & Namespace settings +using Java = import "/java/java.capnp"; +$Java.package("io.iguaz.v3io.daemon.client.api.capnp"); +$Java.outerClassname("VnObjectItemsScanCookieOuter"); + +struct VnObjectItemsScanCookie { + sliceId @0 :UInt16; + inodeNumber @1 :UInt32; + clientSliceListPos @2 :UInt64; + clientSliceListEndPos @3 :UInt64; +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsScanCookie.capnp.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsScanCookie.capnp.go new file mode 100644 index 00000000..25be902d --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/schemas/node/common/VnObjectItemsScanCookie.capnp.go @@ -0,0 +1,119 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package node_common_capnp + +import ( + capnp "zombiezen.com/go/capnproto2" + text "zombiezen.com/go/capnproto2/encoding/text" + schemas "zombiezen.com/go/capnproto2/schemas" +) + +type VnObjectItemsScanCookie struct{ capnp.Struct } + +// VnObjectItemsScanCookie_TypeID is the unique identifier for the type VnObjectItemsScanCookie. +const VnObjectItemsScanCookie_TypeID = 0x8402081dabc79360 + +func NewVnObjectItemsScanCookie(s *capnp.Segment) (VnObjectItemsScanCookie, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 0}) + return VnObjectItemsScanCookie{st}, err +} + +func NewRootVnObjectItemsScanCookie(s *capnp.Segment) (VnObjectItemsScanCookie, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 0}) + return VnObjectItemsScanCookie{st}, err +} + +func ReadRootVnObjectItemsScanCookie(msg *capnp.Message) (VnObjectItemsScanCookie, error) { + root, err := msg.RootPtr() + return VnObjectItemsScanCookie{root.Struct()}, err +} + +func (s VnObjectItemsScanCookie) String() string { + str, _ := text.Marshal(0x8402081dabc79360, s.Struct) + return str +} + +func (s VnObjectItemsScanCookie) SliceId() uint16 { + return s.Struct.Uint16(0) +} + +func (s VnObjectItemsScanCookie) SetSliceId(v uint16) { + s.Struct.SetUint16(0, v) +} + +func (s VnObjectItemsScanCookie) InodeNumber() uint32 { + return s.Struct.Uint32(4) +} + +func (s VnObjectItemsScanCookie) SetInodeNumber(v uint32) { + s.Struct.SetUint32(4, v) +} + +func (s VnObjectItemsScanCookie) ClientSliceListPos() uint64 { + return s.Struct.Uint64(8) +} + +func (s VnObjectItemsScanCookie) SetClientSliceListPos(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s VnObjectItemsScanCookie) ClientSliceListEndPos() uint64 { + return s.Struct.Uint64(16) +} + +func (s VnObjectItemsScanCookie) SetClientSliceListEndPos(v uint64) { + s.Struct.SetUint64(16, v) +} + +// VnObjectItemsScanCookie_List is a list of VnObjectItemsScanCookie. +type VnObjectItemsScanCookie_List struct{ capnp.List } + +// NewVnObjectItemsScanCookie creates a new list of VnObjectItemsScanCookie. +func NewVnObjectItemsScanCookie_List(s *capnp.Segment, sz int32) (VnObjectItemsScanCookie_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 0}, sz) + return VnObjectItemsScanCookie_List{l}, err +} + +func (s VnObjectItemsScanCookie_List) At(i int) VnObjectItemsScanCookie { + return VnObjectItemsScanCookie{s.List.Struct(i)} +} + +func (s VnObjectItemsScanCookie_List) Set(i int, v VnObjectItemsScanCookie) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s VnObjectItemsScanCookie_List) String() string { + str, _ := text.MarshalList(0x8402081dabc79360, s.List) + return str +} + +// VnObjectItemsScanCookie_Promise is a wrapper for a VnObjectItemsScanCookie promised by a client call. +type VnObjectItemsScanCookie_Promise struct{ *capnp.Pipeline } + +func (p VnObjectItemsScanCookie_Promise) Struct() (VnObjectItemsScanCookie, error) { + s, err := p.Pipeline.Struct() + return VnObjectItemsScanCookie{s}, err +} + +const schema_b56ec2d13b48b7cb = "x\xdal\xce\xb1J+A\x1c\x85\xf1s\xfe\x93\xdcI" + + "\xba\xccMZ\xd1\xda\xc2 v\xda\x88\"\x18\x90\x98!" + + "`ea2;\xe0hvfq\xd7B\x08\xd8X\xd8" + + "\xdb\xf9\x04\x82\x9d \xf66)\xac|\x0a\x1fc%\x95" + + "M\xda\xef\xd7|\x9d\x97}\xd9n.\x04\xb0\x1b\xcd\x7f" + + "\xf5\xc5\xd3\xe2u\xad%\x0f\xb0\x9bT\xf5\xd7\xc7\xf1\xde" + + "\xf7g|GC\x03;\x03\xfe\x97n\xa0\x06\xba\x9e?" + + "\x18\xd61e\xbe\xefR\xae\xf3\x14\xfbg\xf1tz\xe5" + + "]5\xa8|^\x8e\xdd$\x1e\xa6t\x1d\xfc\x96\x9b\x14" + + "\xb1\xd8]\xad\xf4#\xd2vT\x03h\x100\x93\x03\xc0" + + "\x9e+\xdaK\xa1!{\\F?\x05l\xa6h\x0b\xa1" + + "\x11\xf6(\x80\xc9\x9f\x01[(\xda\xb9\xd0(\xe9Q\x01" + + "\xe6\xee\x0d\xb0sE\xfb(\xbc/g\xc1\xf9AF\x0d" + + "\xa1\x06\xeb\xb0\xbc\x1d\xde\xe6\xd0S\x7f\xc3\x16\x84-\xb0" + + "v\xb3\xe0c5\x9e18\x7f\x12\xcaj\xa4R\xc96" + + "\x84\xed\x15x\xb4\x1e\xb3\xd1\x9f\xff\x06\x00\x00\xff\xff\xbc" + + "\xd4O\xa2" + +func init() { + schemas.Register(schema_b56ec2d13b48b7cb, + 0x8402081dabc79360) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/session.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/session.go new file mode 100644 index 00000000..7d67e43e --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/session.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +type Session interface { + + // NewContainer creates a container + NewContainer(*NewContainerInput) (Container, error) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go new file mode 100644 index 00000000..b6a320a2 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go @@ -0,0 +1,385 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +import ( + "context" + "encoding/xml" + "os" + "strconv" + "strings" + "time" +) + +// +// Control plane +// + +type NewSessionInput struct { + URL string + Username string + Password string + AccessKey string +} + +type NewContainerInput struct { + ContainerName string +} + +// +// Data plane +// + +type DataPlaneInput struct { + Ctx context.Context + URL string + ContainerName string + AuthenticationToken string + AccessKey string + Timeout time.Duration +} + +type DataPlaneOutput struct { + ctx context.Context +} + +// +// Container +// + +type GetClusterMDInput struct { + DataPlaneInput +} +type GetClusterMDOutput struct { + DataPlaneOutput + NumberOfVNs int +} + +type GetContainerContentsInput struct { + DataPlaneInput + Path string + GetAllAttributes bool // if "true" return ALL available attributes + DirectoriesOnly bool // if "true" return directory entries only, otherwise return children of any kind + Limit int // max number of entries per request + Marker string // start from specific entry (e.g. to get next chunk) +} + +type Content struct { + Key string `xml:"Key"` + Size *int `xml:"Size"` // file size in bytes + LastSequenceID *int `xml:"LastSequenceId"` // greater than zero for shard files + LastModified string `xml:"LastModified"` // Date in format time.RFC3339: "2019-06-02T14:30:39.18Z" + + Mode FileMode `xml:"Mode"` // octal (ListDir) or decimal (GetItems) base, depends on API, e.g. 33204 or 0100664 + AccessTime string `xml:"AccessTime"` // Date in format time.RFC3339: "2019-06-02T14:30:39.18Z" + CreatingTime string `xml:"CreatingTime"` // Date in format time.RFC3339: "2019-06-02T14:30:39.18Z" + GID string `xml:"GID"` // Hexadecimal representation of GID (e.g. "3e8" -> i.e. "0x3e8" == 1000) + UID string `xml:"UID"` // Hexadecimal representation of UID (e.g. "3e8" -> i.e. "0x3e8" == 1000) + InodeNumber *uint32 `xml:"InodeNumber"` // iNode number +} + +type CommonPrefix struct { + Prefix string `xml:"Prefix"` // directory name + LastModified string `xml:"LastModified"` // Date in format time.RFC3339: "2019-06-02T14:30:39.18Z" + AccessTime string `xml:"AccessTime"` // Date in format time.RFC3339: "2019-06-02T14:30:39.18Z" + CreatingTime string `xml:"CreatingTime"` // Date in format time.RFC3339: "2019-06-02T14:30:39.18Z" + Mode FileMode `xml:"Mode"` // octal number, e.g. 040775 + GID string `xml:"GID"` // Hexadecimal representation of GID (e.g. "3e8" -> i.e. "0x3e8" == 1000) + UID string `xml:"UID"` // Hexadecimal representation of UID (e.g. "3e8" -> i.e. "0x3e8" == 1000) + InodeNumber *uint64 `xml:"InodeNumber"` // iNode number +} + +type FileMode string + +func (vfm FileMode) FileMode() (os.FileMode, error) { + return mode(vfm) +} + +func (vfm FileMode) String() string { + mode, err := vfm.FileMode() + if err != nil { + return "unresolved" + } + return mode.String() +} + +func mode(v3ioFileMode FileMode) (os.FileMode, error) { + const S_IFMT = 0xf000 // nolint: golint + const IP_OFFMASK = 0x1fff // nolint: golint + + // Note, File mode from different API's has different base. + // For example Scan API returns file mode as decimal number (base 10) while ListDir as Octal (base 8) + var sFileMode = string(v3ioFileMode) + if strings.HasPrefix(sFileMode, "0") { + + // Convert octal representation of V3IO into decimal representation of Go + mode, err := strconv.ParseUint(sFileMode, 8, 32) + if err != nil { + return os.FileMode(S_IFMT), err + } + + golangFileMode := ((mode & S_IFMT) << 17) | (mode & IP_OFFMASK) + return os.FileMode(golangFileMode), nil + } + + mode, err := strconv.ParseUint(sFileMode, 10, 32) + if err != nil { + return os.FileMode(S_IFMT), err + } + return os.FileMode(mode), nil +} + +type GetContainerContentsOutput struct { + Name string `xml:"Name"` // Bucket name + NextMarker string `xml:"NextMarker"` // if not empty and isTruncated="true" - has more children (need another fetch to get them) + MaxKeys string `xml:"MaxKeys"` // max number of entries in single batch + Contents []Content `xml:"Contents"` // files + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` // directories + IsTruncated bool `xml:"IsTruncated"` // "true" if has more content. Note, "NextMarker" should not be empty if "true" +} + +type GetContainersInput struct { + DataPlaneInput +} + +type GetContainersOutput struct { + DataPlaneOutput + XMLName xml.Name `xml:"ListBucketResult"` + Owner interface{} `xml:"Owner"` + Results Containers `xml:"Buckets"` +} + +type Containers struct { + Name xml.Name `xml:"Buckets"` + Containers []ContainerInfo `xml:"Bucket"` +} + +type ContainerInfo struct { + BucketName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` + CreationDate string `xml:"CreationDate"` + ID int `xml:"Id"` +} + +// +// Object +// + +type GetObjectInput struct { + DataPlaneInput + Path string + Offset int + NumBytes int +} + +type PutObjectInput struct { + DataPlaneInput + Path string + Offset int + Body []byte + Append bool +} + +type DeleteObjectInput struct { + DataPlaneInput + Path string +} + +// +// KV +// + +type PutItemInput struct { + DataPlaneInput + Path string + Condition string + Attributes map[string]interface{} + UpdateMode string +} + +type PutItemOutput struct { + DataPlaneInput + MtimeSecs int + MtimeNSecs int +} + +type PutItemsInput struct { + DataPlaneInput + Path string + Condition string + Items map[string]map[string]interface{} +} + +type PutItemsOutput struct { + DataPlaneOutput + Success bool + Errors map[string]error +} + +type UpdateItemInput struct { + DataPlaneInput + Path string + Attributes map[string]interface{} + Expression *string + Condition string + UpdateMode string +} + +type UpdateItemOutput struct { + DataPlaneInput + MtimeSecs int + MtimeNSecs int +} + +type GetItemInput struct { + DataPlaneInput + Path string + AttributeNames []string +} + +type GetItemOutput struct { + DataPlaneOutput + Item Item +} + +type GetItemsInput struct { + DataPlaneInput + Path string + TableName string + AttributeNames []string + Filter string + Marker string + ShardingKey string + Limit int + Segment int + TotalSegments int + SortKeyRangeStart string + SortKeyRangeEnd string + RequestJSONResponse bool `json:"RequestJsonResponse"` +} + +type GetItemsOutput struct { + DataPlaneOutput + Last bool + NextMarker string + Items []Item +} + +// +// Stream +// + +type StreamRecord struct { + ShardID *int + Data []byte + ClientInfo []byte + PartitionKey string + SequenceNumber uint64 +} + +type SeekShardInputType int + +const ( + SeekShardInputTypeTime SeekShardInputType = iota + SeekShardInputTypeSequence + SeekShardInputTypeLatest + SeekShardInputTypeEarliest +) + +type CreateStreamInput struct { + DataPlaneInput + Path string + ShardCount int + RetentionPeriodHours int +} + +type CheckPathExistsInput struct { + DataPlaneInput + Path string +} + +type DescribeStreamInput struct { + DataPlaneInput + Path string +} + +type DescribeStreamOutput struct { + DataPlaneOutput + ShardCount int + RetentionPeriodHours int +} + +type DeleteStreamInput struct { + DataPlaneInput + Path string +} + +type PutRecordsInput struct { + DataPlaneInput + Path string + Records []*StreamRecord +} + +type PutRecordResult struct { + SequenceNumber uint64 + ShardID int `json:"ShardId"` + ErrorCode int + ErrorMessage string +} + +type PutRecordsOutput struct { + DataPlaneOutput + FailedRecordCount int + Records []PutRecordResult +} + +type SeekShardInput struct { + DataPlaneInput + Path string + Type SeekShardInputType + StartingSequenceNumber uint64 + Timestamp int +} + +type SeekShardOutput struct { + DataPlaneOutput + Location string +} + +type GetRecordsInput struct { + DataPlaneInput + Path string + Location string + Limit int +} + +type GetRecordsResult struct { + ArrivalTimeSec int + ArrivalTimeNSec int + SequenceNumber uint64 + ClientInfo []byte + PartitionKey string + Data []byte +} + +type GetRecordsOutput struct { + DataPlaneOutput + NextLocation string + MSecBehindLatest int + RecordsBehindLatest int + Records []GetRecordsResult +} diff --git a/functions/query/vendor/github.com/v3io/v3io-go/pkg/errors/errors.go b/functions/query/vendor/github.com/v3io/v3io-go/pkg/errors/errors.go new file mode 100644 index 00000000..103f7f32 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-go/pkg/errors/errors.go @@ -0,0 +1,30 @@ +package v3ioerrors + +import ( + "errors" +) + +var ErrInvalidTypeConversion = errors.New("Invalid type conversion") +var ErrNotFound = errors.New("Not found") +var ErrStopped = errors.New("Stopped") +var ErrTimeout = errors.New("Timed out") + +type ErrorWithStatusCode struct { + error + statusCode int +} + +func NewErrorWithStatusCode(err error, statusCode int) ErrorWithStatusCode { + return ErrorWithStatusCode{ + error: err, + statusCode: statusCode, + } +} + +func (e ErrorWithStatusCode) StatusCode() int { + return e.statusCode +} + +func (e ErrorWithStatusCode) Error() string { + return e.error.Error() +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/.gitignore b/functions/query/vendor/github.com/v3io/v3io-tsdb/.gitignore new file mode 100644 index 00000000..1fe5870c --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/.gitignore @@ -0,0 +1,8 @@ +*.class +*.iml +*.log +.DS_Store +*.sln.iml +.idea/ + +tsdbctl-* diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/.travis.yml b/functions/query/vendor/github.com/v3io/v3io-tsdb/.travis.yml new file mode 100644 index 00000000..44baf85d --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - "1.14.x" + +env: + - GO111MODULE=on TSDB_TEST_TABLE_PATH="TSDB_INTEGRATION_TESTS/$TRAVIS_BUILD_NUMBER" + +script: + - make test + - make lint + - V3IO_TSDB_CONFIG="$TRAVIS_BUILD_DIR/test/ci_v3io.yaml" make integration + - V3IO_TSDB_CONFIG="$TRAVIS_BUILD_DIR/test/ci_v3io_bench.yaml" TSDB_BENCH_INGEST_CONFIG="$TRAVIS_BUILD_DIR/test/benchmark/testdata/tsdb-bench-test-config-ci.yaml" make bench diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile b/functions/query/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile new file mode 100644 index 00000000..dec17c14 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile @@ -0,0 +1,529 @@ +label = "${UUID.randomUUID().toString()}" +BUILD_FOLDER = "/home/jenkins/go" +attempts=15 +git_project = "v3io-tsdb" +git_project_user = "v3io" +git_project_upstream_user = "v3io" +git_deploy_user = "iguazio-prod-git-user" +git_deploy_user_token = "iguazio-prod-git-user-token" +git_deploy_user_private_key = "iguazio-prod-git-user-private-key" + + +def build_nuclio(V3IO_TSDB_VERSION, internal_status="stable") { + withCredentials([ + usernamePassword(credentialsId: git_deploy_user, passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def git_project = 'tsdb-nuclio' + stage('prepare sources') { + container('jnlp') { + if (!fileExists("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}")) { + sh("cd ${BUILD_FOLDER}; git clone https://${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git src/github.com/${git_project_upstream_user}/${git_project}") + } + if ("${internal_status}" == "unstable") { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout development") + } + } else { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout master") + } + } + } + parallel( + 'update tsdb in ingest': { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + rm -rf functions/ingest/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + git clone https://${GIT_TOKEN}@github.com/${git_project_user}/v3io-tsdb.git functions/ingest/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + cd functions/ingest/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + git checkout ${V3IO_TSDB_VERSION} + """ + } + } + container('golang') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}/functions/ingest/vendor/github.com/${git_project_upstream_user}/v3io-tsdb") { + sh """ + GO111MODULE=on go mod vendor + rm -rf .git vendor/github.com/nuclio vendor/github.com/${git_project_upstream_user}/frames/vendor/golang.org/x/net vendor/golang.org/x/net + """ + sh("chown 1000:1000 ./ -R") + } + } + }, + 'update tsdb in query': { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + rm -rf functions/query/vendor/github.com/${git_project_upstream_user}/v3io-tsdb functions/query/vendor/github.com/${git_project_upstream_user}/v3io-go + git clone https://${GIT_TOKEN}@github.com/${git_project_user}/v3io-tsdb.git functions/query/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + cd functions/query/vendor/github.com/${git_project_upstream_user}/v3io-tsdb + git checkout ${V3IO_TSDB_VERSION} + """ + } + } + container('golang') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}/functions/query/vendor/github.com/${git_project_upstream_user}/v3io-tsdb") { + sh """ + GO111MODULE=on go mod vendor + rm -rf .git vendor/github.com/nuclio vendor/github.com/${git_project_upstream_user}/frames/vendor/golang.org/x/net vendor/golang.org/x/net + mv vendor/github.com/v3io/v3io-go ${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}/functions/query/vendor/github.com/${git_project_upstream_user}/v3io-go + """ + sh("chown 1000:1000 ./ -R") + } + } + } + ) + } + + stage('git push') { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + git config --global user.email '${GIT_USERNAME}@iguazio.com' + git config --global user.name '${GIT_USERNAME}' + git remote rm origin + git remote add origin https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git + git add functions/ingest/vendor/github.com functions/query/vendor/github.com; + """ + try { + common.shellc("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") + } catch (err) { + echo "Can not commit" + } + try { + if ( "${internal_status}" == "unstable" ) { + common.shellc("git push origin development") + } else { + common.shellc("git push origin master") + } + } catch (err) { + echo "Can not push code" + } + } + } + container('golang') { + sh("rm -rf ${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") + } + } + } +} + +def build_prometheus(V3IO_TSDB_VERSION, FRAMES_VERSION, internal_status="stable") { + withCredentials([ + usernamePassword(credentialsId: git_deploy_user, passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def git_project = 'prometheus' + + stage('prepare sources') { + container('jnlp') { + if (!fileExists("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}")) { + sh("cd ${BUILD_FOLDER}; git clone https://${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git src/github.com/${git_project_upstream_user}/${git_project}") + } + if ("${internal_status}" == "unstable") { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout development") + } + } else { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout master") + } + } + } + container('golang') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + if("${git_project_user}" != "${git_project_upstream_user}") { + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/v3io-tsdb=github.com/${git_project_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/frames=github.com/${git_project_user}/frames@${FRAMES_VERSION}") + sh("GO111MODULE=on go get") + } else { + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/v3io-tsdb=github.com/${git_project_upstream_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/frames=github.com/${git_project_upstream_user}/frames@${FRAMES_VERSION}") + } + sh("GO111MODULE=on go mod vendor") + sh("chown 1000:1000 ./ -R") + } + } + } + + stage('git push') { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + git config --global user.email '${GIT_USERNAME}@iguazio.com' + git config --global user.name '${GIT_USERNAME}' + git remote rm origin + git remote add origin https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git + git add go.mod go.sum vendor/modules.txt vendor; + """ + try { + common.shellc("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") + } catch (err) { + echo "Can not commit" + } + try { + if ( "${internal_status}" == "unstable" ) { + common.shellc("git push origin development") + } else { + common.shellc("git push origin master") + } + } catch (err) { + echo "Can not push code" + } + } + } + container('golang') { + sh("rm -rf ${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") + } + } + } +} + +def build_frames(V3IO_TSDB_VERSION, internal_status="stable") { + withCredentials([ + usernamePassword(credentialsId: git_deploy_user, passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def git_project = 'frames' + + stage('prepare sources') { + container('jnlp') { + if (!fileExists("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}")) { + sh("cd ${BUILD_FOLDER}; git clone https://${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git src/github.com/${git_project_upstream_user}/${git_project}") + } + if ("${internal_status}" == "unstable") { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout development") + } + } else { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh("git stash") + sh("git checkout master") + } + } + } + container('golang') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + if("${git_project_user}" != "${git_project_upstream_user}") { + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/v3io-tsdb=github.com/${git_project_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + sh("GO111MODULE=on go get") + } else { + sh("GO111MODULE=on go get github.com/${git_project_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + } + sh("chown 1000:1000 ./ -R") + } + } + } + + stage('git push') { + container('jnlp') { + dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { + sh """ + git config --global user.email '${GIT_USERNAME}@iguazio.com' + git config --global user.name '${GIT_USERNAME}' + git remote rm origin + git remote add origin https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git + git add go.mod go.sum + """ + try { + common.shellc("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") + } catch (err) { + echo "Can not commit" + } + try { + if ( "${internal_status}" == "unstable" ) { + common.shellc("git push origin development") + } else { + common.shellc("git push origin master") + } + } catch (err) { + echo "Can not push code" + } + } + } + container('golang') { + sh("rm -rf ${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") + } + } + } +} + +def wait_for_release(V3IO_TSDB_VERSION, next_versions, tasks_list) { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + if (V3IO_TSDB_VERSION != "unstable") { + stage('waiting for prereleases moved to releases') { + container('jnlp') { + i = 0 + def success_count = 0 + + while (true) { + sleep(60) + + def done_count = 0 + + echo "attempt #${i}" + tasks_list.each { project, status -> + if (status == null) { + def RELEASE_SUCCESS = sh( + script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/releases/tags/${next_versions[project]} | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[\"prerelease\"]' | if grep -iq false; then echo 'release'; else echo 'prerelease'; fi", + returnStdout: true + ).trim() + + echo "${project} is ${RELEASE_SUCCESS}" + if (RELEASE_SUCCESS != null && RELEASE_SUCCESS == 'release') { + tasks_list.putAt(project, true) + done_count++ + success_count++ + } else { + def TAG_SHA = sh( + script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/git/refs/tags/${next_versions[project]} | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[\"object\"][\"sha\"]'", + returnStdout: true + ).trim() + + if (TAG_SHA != null) { + def COMMIT_STATUS = sh( + script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/commits/${TAG_SHA}/statuses | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[0][\"state\"]' | if grep -iq error; then echo 'error'; else echo 'ok'; fi", + returnStdout: true + ).trim() + if (COMMIT_STATUS != null && COMMIT_STATUS == 'error') { + tasks_list.putAt(project, false) + done_count++ + } + } + } + } else { + done_count++ + } + } + if (success_count >= tasks_list.size()) { + echo "all releases have been successfully completed" + break + } + + if (done_count >= tasks_list.size() || i++ > attempts) { + def failed = [] + def notcompleted = [] + def error_string = '' + tasks_list.each { project, status -> + if (status == null) { + notcompleted += project + } else if (status == false) { + failed += project + } + } + if (failed.size()) { + error_string += failed.join(',') + ' have been failed :_(. ' + } + if (notcompleted.size()) { + error_string += notcompleted.join(',') + ' have been not completed :(. ' + } + error(error_string) + break + } + } + } + } + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger tsdb-nuclio and prometheus") + } + } + } +} + +podTemplate(label: "${git_project}-${label}", inheritFrom: "jnlp-docker-golang") { + def MAIN_TAG_VERSION + def FRAMES_NEXT_VERSION + def next_versions = ['prometheus':null, 'tsdb-nuclio':null, 'frames':null] + + pipelinex = library(identifier: 'pipelinex@development', retriever: modernSCM( + [$class: 'GitSCMSource', + credentialsId: git_deploy_user_private_key, + remote: "git@github.com:iguazio/pipelinex.git"])).com.iguazio.pipelinex + + common.notify_slack { + node("${git_project}-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + stage('get tag data') { + container('jnlp') { + MAIN_TAG_VERSION = github.get_tag_version(TAG_NAME) + + echo "$MAIN_TAG_VERSION" + } + } + + if (github.check_tag_expiration(git_project, git_project_user, MAIN_TAG_VERSION, GIT_TOKEN)) { + parallel( + 'tsdb-nuclio': { + podTemplate(label: "v3io-tsdb-nuclio-${label}", inheritFrom: "jnlp-docker-golang") { + node("v3io-tsdb-nuclio-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def NEXT_VERSION + + if (MAIN_TAG_VERSION != "unstable") { + stage('get previous release version') { + container('jnlp') { + NEXT_VERSION = github.get_next_short_tag_version("tsdb-nuclio", git_project_user, GIT_TOKEN) + next_versions.putAt("tsdb-nuclio", NEXT_VERSION) + } + } + + build_nuclio(MAIN_TAG_VERSION, "unstable") + build_nuclio(MAIN_TAG_VERSION) + + stage('create tsdb-nuclio prerelease') { + container('jnlp') { + // development has been triggered when committed to it in github-webhook nuclio function + // echo "Triggered tsdb-nuclio development will be builded with last tsdb stable version" + // github.delete_release("tsdb-nuclio", git_project_user, "unstable", GIT_TOKEN) + // github.create_prerelease("tsdb-nuclio", git_project_user, "unstable", GIT_TOKEN, "development") + + echo "Trigger tsdb-nuclio ${NEXT_VERSION} with tsdb ${MAIN_TAG_VERSION}" + github.create_prerelease("tsdb-nuclio", git_project_user, NEXT_VERSION, GIT_TOKEN) + } + } + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger tsdb-nuclio") + } + } + } + } + } + }, + 'frames': { + podTemplate(label: "v3io-frames-${label}", inheritFrom: "jnlp-docker-golang") { + node("v3io-frames-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def NEXT_VERSION + + if (MAIN_TAG_VERSION != "unstable") { + stage('get previous release version') { + container('jnlp') { + NEXT_VERSION = github.get_next_short_tag_version("frames", git_project_user, GIT_TOKEN) + FRAMES_NEXT_VERSION = NEXT_VERSION + next_versions.putAt("frames", NEXT_VERSION) + } + } + + build_frames(MAIN_TAG_VERSION, "unstable") + build_frames(MAIN_TAG_VERSION) + + stage('create frames prerelease') { + container('jnlp') { + // development has been triggered when committed to it in github-webhook nuclio function + // echo "Triggered frames development will be builded with last tsdb stable version" + // github.delete_release("frames", git_project_user, "unstable", GIT_TOKEN) + // github.create_prerelease("frames", git_project_user, "unstable", GIT_TOKEN, "development") + + echo "Trigger frames ${NEXT_VERSION} with tsdb ${MAIN_TAG_VERSION}" + github.create_prerelease("frames", git_project_user, NEXT_VERSION, GIT_TOKEN) + } + } + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger frames") + } + } + } + } + } + } + ) + } + } + } + + node("${git_project}-${label}") { + wait_for_release(MAIN_TAG_VERSION, next_versions, ['tsdb-nuclio': null, 'frames': null]) + } + + // prometheus moved last cos need frames version to build + podTemplate(label: "v3io-tsdb-prometheus-${label}", inheritFrom: "jnlp-docker-golang") { + node("v3io-tsdb-prometheus-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def TAG_VERSION + def NEXT_VERSION + + if (MAIN_TAG_VERSION != "unstable") { + stage('get current version') { + container('jnlp') { + sh """ + cd ${BUILD_FOLDER} + git clone https://${GIT_TOKEN}@github.com/${git_project_user}/prometheus.git src/github.com/prometheus/prometheus + """ + + TAG_VERSION = sh( + script: "cat ${BUILD_FOLDER}/src/github.com/prometheus/prometheus/VERSION", + returnStdout: true + ).trim() + } + } + + if (TAG_VERSION) { + stage('get previous release version') { + container('jnlp') { + NEXT_VERSION = github.get_next_short_tag_version("prometheus", git_project_user, GIT_TOKEN) + echo "$NEXT_VERSION" + next_versions.putAt('prometheus', NEXT_VERSION) + } + } + + build_prometheus(MAIN_TAG_VERSION, FRAMES_NEXT_VERSION, "unstable") + build_prometheus(MAIN_TAG_VERSION, FRAMES_NEXT_VERSION) + + stage('create prometheus prerelease') { + container('jnlp') { + // development has been triggered when committed to it in github-webhook nuclio function + // echo "Triggered prometheus development will be builded with last tsdb stable version" + // github.delete_release("prometheus", git_project_user, "unstable", GIT_TOKEN) + // github.create_prerelease("prometheus", git_project_user, "unstable", GIT_TOKEN, "development") + + echo "Trigger prometheus ${NEXT_VERSION} with tsdb ${MAIN_TAG_VERSION}" + github.create_prerelease("prometheus", git_project_user, NEXT_VERSION, GIT_TOKEN) + } + } + } + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger prometheus") + } + } + } + } + } + + node("${git_project}-${label}") { + wait_for_release(MAIN_TAG_VERSION, next_versions, ['prometheus': null]) + } + + node("${git_project}-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + stage('update release status') { + container('jnlp') { + github.update_release_status(git_project, git_project_user, "${MAIN_TAG_VERSION}", GIT_TOKEN) + } + } + } + } + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/LICENSE b/functions/query/vendor/github.com/v3io/v3io-tsdb/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/Makefile b/functions/query/vendor/github.com/v3io/v3io-tsdb/Makefile new file mode 100644 index 00000000..ebf9b9c1 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/Makefile @@ -0,0 +1,106 @@ +GIT_COMMIT_HASH := $(shell git rev-parse HEAD) +GIT_BRANCH=$(shell git rev-parse --abbrev-ref HEAD) +ifeq ($(GIT_BRANCH),) + GIT_BRANCH="N/A" +endif + +ifneq ($(TSDB_LABEL),) + GIT_REVISION := $(TSDB_LABEL) +else + GIT_REVISION := $(shell git describe --always) +endif + +GOOS ?= $(shell go env GOOS) +GOARCH ?= $(shell go env GOARCH) +GOPATH ?= $(shell go env GOPATH) + +TSDBCTL_BIN_NAME := tsdbctl-$(GIT_REVISION)-$(GOOS)-$(GOARCH) + +# Use RFC3339 (ISO8601) date format +BUILD_TIME := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") + +# Use fully qualified package name +CONFIG_PKG=github.com/v3io/v3io-tsdb/pkg/config + +# Use Go linker to set the build metadata +BUILD_OPTS := -ldflags " \ + -X $(CONFIG_PKG).buildTime=$(BUILD_TIME) \ + -X $(CONFIG_PKG).osys=$(GOOS) \ + -X $(CONFIG_PKG).architecture=$(GOARCH) \ + -X $(CONFIG_PKG).version=$(GIT_REVISION) \ + -X $(CONFIG_PKG).commitHash=$(GIT_COMMIT_HASH) \ + -X $(CONFIG_PKG).branch=$(GIT_BRANCH)" \ + -v -o "$(GOPATH)/bin/$(TSDBCTL_BIN_NAME)" + +TSDB_BUILD_COMMAND ?= GO111MODULE="on" CGO_ENABLED=0 go build $(BUILD_OPTS) ./cmd/tsdbctl + +.PHONY: fmt +fmt: + gofmt -l -s -w . + +.PHONY: get +get: + GO111MODULE="on" go mod tidy + +.PHONY: test +test: + go test -v -race -tags unit -count 1 ./... + +.PHONY: integration +integration: + go test -v -race -tags integration -p 1 -count 1 ./... # p=1 to force Go to run pkg tests serially. + +.PHONY: bench +bench: + go test -run=XXX -bench='^BenchmarkIngest$$' -benchtime 10s -timeout 5m ./test/benchmark/... + +.PHONY: build +build: + docker run \ + --volume $(shell pwd):/go/src/github.com/v3io/v3io-tsdb \ + --volume $(shell pwd):/go/bin \ + --workdir /go/src/github.com/v3io/v3io-tsdb \ + --env GOOS=$(GOOS) \ + --env GOARCH=$(GOARCH) \ + golang:1.12 \ + make bin + +.PHONY: bin +bin: + ${TSDB_BUILD_COMMAND} + +PHONY: gofmt +gofmt: +ifeq ($(shell gofmt -l .),) + # gofmt OK +else + $(error Please run `go fmt ./...` to format the code) +endif + +.PHONY: impi +impi: + @echo Installing impi... + GO111MODULE=off go get -u github.com/pavius/impi/cmd/impi + @echo Verifying imports... + $(GOPATH)/bin/impi \ + --local github.com/iguazio/provazio \ + --skip pkg/controller/apis \ + --skip pkg/controller/client \ + --ignore-generated \ + --scheme stdLocalThirdParty \ + ./... + +$(GOPATH)/bin/golangci-lint: + @echo Installing golangci-lint... + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.10.2 + cp ./bin/golangci-lint $(GOPATH)/bin/ + +.PHONY: lint +lint: gofmt impi $(GOPATH)/bin/golangci-lint + @echo Linting... + @$(GOPATH)/bin/golangci-lint run \ + --disable-all --enable=deadcode --enable=goconst --enable=golint --enable=ineffassign \ + --enable=interfacer --enable=unconvert --enable=varcheck --enable=errcheck --enable=gofmt --enable=misspell \ + --enable=staticcheck --enable=gosimple --enable=govet --enable=goconst \ + cmd/... pkg/... internal/... + @echo done linting diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/README.md b/functions/query/vendor/github.com/v3io/v3io-tsdb/README.md new file mode 100644 index 00000000..3d77474a --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/README.md @@ -0,0 +1,294 @@ +[![Travis Build Status](https://travis-ci.org/v3io/v3io-tsdb.svg?branch=master)](https://travis-ci.org/v3io/v3io-tsdb) +[![GH Build Status](https://github.com/v3io/v3io-tsdb/workflows/CI/badge.svg)](https://github.com/v3io/v3io-tsdb/actions) + +# V3IO-TSDB +Iguazio API lib for time-series DB access and Prometheus TSDB storage driver. + +> Note: This project is still under development, it requires the latest 1.7 release of iguazio DB (with Blob functions) + +## Overview +Iguazio provides a real-time flexible document database engine which accelerates popular BigData and open-source +frameworks such as Spark and Presto, as well as provide AWS compatible data APIs (DynamoDB, Kinesis, S3). + +Iguazio DB engine runs at the speed of in-memory databases, but uses lower cost and higher density (NVMe) Flash, it has +a unique low-level design with highly parallel processing and OS bypass which treats Flash as async memory pages. + +Iguazio DB low-level APIs (v3io) has rich API semantics and multiple indexing types, those allow it to run multiple +workloads and processing engines on exactly the same data, and consistently read/write the data in different tools. + +This project uses v3io semantics (row & col layouts, arrays, random & sequential indexes, etc.) to provide extremely +fast and scalable Time Series database engine which can be accessed simultaneously by multiple engines and APIs, such as: +- [Prometheus](https://prometheus.io/) Time Series DB (for metrics scraping & queries) +- [nuclio](https://github.com/nuclio/nuclio) serverless functions (for real-time ingestion, stream processing or queries) +- Iguazio DynamoDB API (with extensions) +- Apache Presto & Spark (future item, for SQL & AI) +- Built-in CLI (tsdbctl) for DB creation, ingestion, and queries + +[nuclio](https://github.com/nuclio/nuclio) supports HTTP and a large variety of streaming/triggering options (Kafka, Kinesis +, Azure event-hub, RabbitMQ, NATS, Iguazio streams, MQTT, Cron tasks), it provides automatic deployment and auto-scaling +enabling ingestion from variety of sources at endless scalability. using nuclio functions can be customized to pre-process +incoming data e.g. examine metric data, alert, convert formats, etc. + +
+ +![architecture](timeseries.png) +
+ +## Architecture +The solution stores the raw data in highly compressed column chunks (using Gorilla/XOR compression variation), with one +chunk for every n hours (1hr default), queries will only retrieve and decompress the specific columns based on the +requested time range. + +Users can define pre-aggregates (count, avg, sum, min, max, stddev, stdvar, last, rate) which use v3io update expressions and store +data consistently in arrays per user defined intervals (RollupMin) and/or dimensions (labels). + +![data layout](dataorg.png) + +High-resolution queries will detect the pre-aggregates automatically and selectively access the array ranges +(skip chunk retrieval, decompression, and aggregation) which significantly accelerate searches and provide real-time +response. an extension supports overlapping aggregates (retrieve last 1hr, 6h, 12hr, 24hr stats in a single request), +this is currently not possible via the standard Prometheus TSDB API. + +The data can be partitioned to multiple tables (e.g. one per week) or use a cyclic table (goes back to the first chunk after + it reached the end), multiple tables are stored in a hierarchy under the specified path. + +Metric names and labels are stored in search optimized keys and string attributes. Iguazio DB engine can run full +dimension scan (searches) in the rate of millions of metrics per second, or use selective range based queries to access +a specific metric family. + +The use of v3io random access keys (Hash based) allow real-time sample data ingestion/retrieval and stream processing. + +To maintain high-performance over low-speed connections we implement auto IO throttling, if the link is slow multiple +samples will be pushed in a single operation, users can configure the maximum allowed batch (trade efficiency with +consistency). IO is done using multiple parallel connections/workers enabling maximum throughput regardless of the +link latency. + +## How To Use + +The code is separated to Prometheus compliant adapter in [/promtsdb](promtsdb) and more generic/advanced adapter in +[/pkg/tsdb](pkg/tsdb), you should use the latter for custom functions and code. See a full usage example in +[v3iotsdb_test.go](/pkg/tsdb/v3iotsdb_test.go), both have similar semantics. + +For Prometheus you would need to use the fork found in `https://github.com/v3io/prometheus`, it already loads this +library, you would need to place a `v3io-tsdb-config.yaml` file with relevant configuration in the same folder as the Prometheus +executable (see details on configurations below). + +A developer using this library should first create a TSDB, this can be done using the CLI or an API call (`CreateTSDB`) +which builds the TSDB metadata in the DB. To use the DB you should create an Adapter using the method `NewV3ioAdapter()` +, with the adapter he can create an `Appender` for adding samples or `Querier` for querying the database and retrieving +a set of metrics or aggregates, see the following sections for details. + +A user can run the CLI to add (append) or query the DB, to use the CLI, build the code under [tsdbctl](cmd/tsdbctl), +it has built-in help, see the following add/query examples: + +``` + # create a DB with expected ingestion rate of one sample per second and some aggregates (at 30 min interval) + # and cross-label aggregates for "host" + tsdbctl create -t --ingestion-rate 1/s -a count,sum,max -i 30m -l label1 + + # display DB info with metric names (types) + tsdbctl info -t
-n + + # append a sample (73.2) to the specified metric type (cpu) + labels at the current time + tsdbctl add -t
cpu os=win,node=xyz123 -d 73.2 + + # display all the CPU metrics for win servers from the last hours, in CSV format + tsdbctl query -t
cpu -f "os=='win'" -l 1h -o csv + +``` + +For use with nuclio function you can see function example under [\nuclio](examples/nuclio) + +## API Walkthrough + +### Creating and Configuring a TSDB Adapter + +The first step is to create a TSDB, this is done only once per TSDB and generates the required metadata and configuration +such as partitioning strategy, retention, aggregates, etc. this can be done via the CLI or a function call. + +```go + // Load v3io connection/path details (see YAML below) + v3iocfg, err := config.GetOrLoadFromFile("v3io-tsdb-config.yaml") + if err != nil { + // TODO: handle error + } + + // Specify the default DB configuration (can be modified per partition) + samplesIngestionRate = "1/s" + aggregationGranularity = "1h" + aggregatesList = "scount,avg,min,max" + crossLabel = "label1,label2;label3" + schema, err := schema.NewSchema(v3iocfg, samplesIngestionRate, aggregationGranularity, aggregatesList, crossLabel) + if err != nil { + // TODO: handle error + } + + return tsdb.CreateTSDB(v3iocfg, schema) +``` + +> If you plan on using pre-aggregation to speed aggregate queries you should specify the `Rollups` (function list) and +`RollupMin` (bucket time in minutes) parameters, the supported aggregation functions are: count, sum, avg, min, max, +stddev, stdvar. + +In order to use the TSDB we need to create an adapter, the `NewV3ioAdapter` function accepts 3 +parameters: the configuration structure, v3io data container object and logger object. The last 2 are optional, in case +you already have container and logger (when using nuclio data bindings). + +Configuration is specified in a YAML or JSON format, and can be read from a file using `config.GetOrLoadFromFile(path string)` +or can be loaded from a local buffer using `config.GetOrLoadFromData(data []byte)`. +You can see details on the configuration options in the V3IO TSDB [**config.go**](pkg/config/config.go) source file. +A template configuration file is found at **examples/v3io-tsdb-config.yaml.template**. +You can use it as a reference for creating your own TSDB configuration file. +For example: + +```yaml +webApiEndpoint: "192.168.1.100:8081" +container: "tsdb" +username: "johnd" +password: "OpenSesame" +``` + +Following is an example of code for creating an adapter: + +```go + // create configuration object from file + cfg, err := config.GetOrLoadFromFile("v3io-tsdb-config.yaml") + if err != nil { + // TODO: handle error + } + + // create and start a new TSDB adapter + adapter, err := tsdb.NewV3ioAdapter(cfg, nil, nil) + if err != nil { + // TODO: handle error + } +``` + +### Creating and using an Appender (ingest metrics) + +The `Appender` interface is used to ingest metrics data, there are two functions for it: `Add` and `AddFast` which can be +after we used Add (using the refID returned by Add) to reduce some lookup/hash overhead. + +Example: + +```go + // create an Appender interface + appender, err := adapter.Appender() + if err != nil { + panic(err) + } + + // create metrics labels, `__name__` label specify the metric type (e.g. cpu, temperature, ..) + // the other labels can be used in searches (filtering or grouping) or aggregations + // use utils.LabelsFromStrings(s ...string) for string list input or utils.LabelsFromMap(m map[string]string) for map input + lset := utils.LabelsFromStrings("__name__","http_req", "method", "post") + + // Add a sample with current time (in milisec) and the value of 7.9 + ref, err := appender.Add(lset, time.Now().Unix * 1000, 7.9) + if err != nil { + panic(err) + } + + // Add a second sample using AddFast and the refID from Add + err := appender.AddFast(nil, ref, time.Now().Unix * 1000 + 1000, 8.3) + if err != nil { + panic(err) + } +``` + +### Creating and using a Querier (read metrics and aggregates) + +The `Querier` interface is used to query the database and return one or more metrics, we first need to create a `Querier`, +once we did we can use `Select()` which will return a list of series (as an iterator object). + +Every returned series have two interfaces, `Labels()` which returns the series or aggregate labels, and `Iterator()` +which returns an iterator over the series or aggregate values. + +The `Select()` call accepts a `SelectParams` parameter which has the following properties: +* From (int64) - a timestamp in milliseconds specifying the start time of the query +* To (int64) - a timestamp in milliseconds specifying the end time of the query +* Name (string) - optional, comma separated metric types (e.g. cpu, memory, ..), specifying it accelerate performance (use range queries) +* Step (int64) - optional, the step interval in milliseconds used for the aggregation functions or for downsampling raw data +* Functions (string) - optional, a comma separated list of aggregation functions e.g. `"count,sum,avg,stddev"` +* Filter (string) - optional, V3IO GetItems filter expression for selecting the desired metrics e.g. `_name=='http_req'` +* GroupBy (string) - optional, a comma seperated list of labels to group the results by e.g. `"method"` +* RequestedColumns ([]RequestedColumn) - optional, as an alternative to `Name` & `Function` a user can pass a list of `RequestedColumn` object that specify which metrics and aggregates to query. + Using this API it is possible to query several metrics in the same query. + + +Using `Functions` and `Step` is optional, use it only when you are interested in pre-aggregation and the step is >> than +the sampling interval (and preferably equal or greater than the partition RollupMin interval). +There are two types of aggregates: +* aggregates over time - aggregates the data into buckets over a period of time. This will result in a series for every unique label set per aggregate. +* aggregates across series - aggregates the data for all the different label sets into one series per aggregate. Add an `_all` suffix to the aggregate name to use this kind of aggregation. + +In both cases, the `Aggregate` label will be added to that series with the function name. +But, a user can use an aggregate over time **or** aggregate across series but not both in the same query. + +creating a querier: + +```go + qry, err := adapter.QuerierV2() + if err != nil { + panic(err) + } +``` + +Simple select example (no aggregates): +```go + params := &pquerier.SelectParams{Name: "http_req", + Filter: "method=='post'", + From: minTime, + To: maxTime} + set, err := qry.Select(params) +``` + +Select using aggregates: + +```go + params := &pquerier.SelectParams{Name: "http_req", + Filter: "method=='post'", + From: minTime, + To: maxTime, + Step: 1000*3600, + Functions: "count,avg,sum,max"} + set, err := qry.Select(params) +``` + +Select using RequestedColumns: + +```go + wantedColumns: []pquerier.RequestedColumn{{Metric: "http_req", Function: "avg"}, + {Metric: "http_req", Function: "count"}, + {Metric: "http_req", Function: "max"}, + {Metric: "tcp_req", Function: "avg"}} + params := &pquerier.SelectParams{RequestedColumns: wantedColumns + Filter: "method=='post'", + From: minTime, + To: maxTime, + Step: 1000*3600} + set, err := qry.Select(params) +``` + +Once we obtain a set using one of the methods above we can iterate over the set and the individual series in the following way: + +```go + for set.Next() { + if set.Err() != nil { + panic(set.Err()) + } + + series := set.At() + fmt.Println("\nLables:", series.Labels()) + iter := series.Iterator() + for iter.Next() { + if iter.Err() != nil { + panic(iter.Err()) + } + + t, v := iter.At() + fmt.Printf("t=%d,v=%.2f ", t, v) + } + fmt.Println() + } +``` diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go new file mode 100644 index 00000000..2b62ab35 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go @@ -0,0 +1,27 @@ +package main + +import ( + "os" + + "github.com/v3io/v3io-tsdb/pkg/tsdbctl" +) + +func main() { + if err := Run(); err != nil { + os.Exit(1) + } + os.Exit(0) +} + +func Run() error { + rootCmd := tsdbctl.NewRootCommandeer() + defer tearDown(rootCmd) + return rootCmd.Execute() +} + +func tearDown(cmd *tsdbctl.RootCommandeer) { + if cmd.Reporter != nil { // could be nil if has failed on initialisation + // nolint: errcheck + cmd.Reporter.Stop() + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/dataorg.png b/functions/query/vendor/github.com/v3io/v3io-tsdb/dataorg.png new file mode 100644 index 00000000..b34a5532 Binary files /dev/null and b/functions/query/vendor/github.com/v3io/v3io-tsdb/dataorg.png differ diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/ingest/ingest_example.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/ingest/ingest_example.go new file mode 100644 index 00000000..fb9bb1e3 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/ingest/ingest_example.go @@ -0,0 +1,197 @@ +package main + +import ( + "encoding/json" + "os" + "sort" + "strconv" + "strings" + "sync" + + "github.com/nuclio/nuclio-sdk-go" + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +/* +Example event: +{ + "metric": "cpu", + "labels": { + "dc": "7", + "hostname": "mybesthost" + }, + "samples": [ + { + "t": "1532595945142", + "v": { + "N": 95.2 + } + }, + { + "t": "1532595948517", + "v": { + "n": 86.8 + } + } + ] +} +*/ + +type value struct { + N float64 `json:"n,omitempty"` +} + +type sample struct { + Time string `json:"t"` + Value value `json:"v"` +} + +type request struct { + Metric string `json:"metric"` + Labels map[string]string `json:"labels,omitempty"` + Samples []sample `json:"samples"` +} + +var tsdbAppender tsdb.Appender +var tsdbAppenderMtx sync.Mutex + +func Handler(context *nuclio.Context, event nuclio.Event) (interface{}, error) { + var request request + + // parse body + if err := json.Unmarshal(event.GetBody(), &request); err != nil { + return "", nuclio.WrapErrBadRequest(err) + } + + if strings.TrimSpace(request.Metric) == "" { + return nil, nuclio.WrapErrBadRequest(errors.New(`request is missing the mandatory 'metric' field`)) + } + + // convert the map[string]string -> []Labels + labels := getLabelsFromRequest(request.Metric, request.Labels) + + var ref uint64 + // iterate over request samples + for _, sample := range request.Samples { + + // if time is not specified assume "now" + if sample.Time == "" { + sample.Time = "now" + } + // convert time string to time int, string can be: now, now-2h, int (unix milisec time), or RFC3339 date string + sampleTime, err := utils.Str2unixTime(sample.Time) + if err != nil { + return "", errors.Wrap(err, "Failed to parse time: "+sample.Time) + } + // append sample to metric + if ref == 0 { + ref, err = tsdbAppender.Add(labels, sampleTime, sample.Value.N) + } else { + err = tsdbAppender.AddFast(labels, ref, sampleTime, sample.Value.N) + } + if err != nil { + return "", errors.Wrap(err, "Failed to add sample") + } + } + + return "", nil +} + +// InitContext runs only once when the function runtime starts +func InitContext(context *nuclio.Context) error { + var err error + + // get configuration from env + tsdbTablePath := os.Getenv("INGEST_V3IO_TSDB_PATH") + if tsdbTablePath == "" { + return errors.New("INGEST_V3IO_TSDB_PATH must be set") + } + + context.Logger.InfoWith("Initializing", "tsdbTablePath", tsdbTablePath) + + // create TSDB appender + err = createTSDBAppender(context, tsdbTablePath) + if err != nil { + return err + } + + return nil +} + +// convert map[string]string -> utils.Labels +func getLabelsFromRequest(metricName string, labelsFromRequest map[string]string) utils.Labels { + + // adding 1 for metric name + labels := make(utils.Labels, 0, len(labelsFromRequest)+1) + + // add the metric name + labels = append(labels, utils.Label{ + Name: "__name__", + Value: metricName, + }) + + for labelKey, labelValue := range labelsFromRequest { + labels = append(labels, utils.Label{ + Name: labelKey, + Value: labelValue, + }) + } + + sort.Sort(labels) + + return labels +} + +func createTSDBAppender(context *nuclio.Context, path string) error { + context.Logger.InfoWith("Creating TSDB appender", "path", path) + + defer tsdbAppenderMtx.Unlock() + tsdbAppenderMtx.Lock() + + if tsdbAppender == nil { + v3ioConfig, err := config.GetOrLoadFromStruct(&config.V3ioConfig{ + TablePath: path, + }) + if err != nil { + return err + } + v3ioUrl := os.Getenv("V3IO_URL") + numWorkersStr := os.Getenv("V3IO_NUM_WORKERS") + var numWorkers int + if len(numWorkersStr) > 0 { + numWorkers, err = strconv.Atoi(numWorkersStr) + if err != nil { + return err + } + } else { + numWorkers = 8 + } + username := os.Getenv("V3IO_USERNAME") + if username == "" { + username = "iguazio" + } + password := os.Getenv("V3IO_PASSWORD") + containerName := os.Getenv("V3IO_CONTAINER") + if containerName == "" { + containerName = "bigdata" + } + container, err := tsdb.NewContainer(v3ioUrl, numWorkers, "", username, password, containerName, context.Logger) + if err != nil { + return err + } + // create adapter once for all contexts + adapter, err := tsdb.NewV3ioAdapter(v3ioConfig, container, context.Logger) + if err != nil { + return err + } + tsdbAppender, err = adapter.Appender() + if err != nil { + return err + } + } + + return nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/query/query_example.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/query/query_example.go new file mode 100644 index 00000000..abccd980 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/examples/nuclio/query/query_example.go @@ -0,0 +1,151 @@ +package main + +import ( + "bytes" + "encoding/json" + "os" + "strconv" + "strings" + "sync" + + "github.com/nuclio/nuclio-sdk-go" + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/formatter" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +/* +Example request: +{ + "metric": "cpu", + "step": "1m", + "start_time": "1532095945142", + "end_time": "1642995948517" +} +*/ + +type request struct { + Metric string `json:"metric"` + Aggregators []string `json:"aggregators"` + FilterExpression string `json:"filter_expression"` + Step string `json:"step"` + StartTime string `json:"start_time"` + EndTime string `json:"end_time"` + Last string `json:"last"` +} + +var tsdbQuerier *pquerier.V3ioQuerier +var tsdbQuerierMtx sync.Mutex + +func Handler(context *nuclio.Context, event nuclio.Event) (interface{}, error) { + request := request{} + + // try to unmarshal the request. return bad request if failed + if err := json.Unmarshal(event.GetBody(), &request); err != nil { + return nil, nuclio.WrapErrBadRequest(err) + } + + context.Logger.DebugWith("Got query request", "request", request) + + // convert string times (unix or RFC3339 or relative like now-2h) to unix milisec times + from, to, step, err := utils.GetTimeFromRange(request.StartTime, request.EndTime, request.Last, request.Step) + if err != nil { + return nil, nuclio.WrapErrBadRequest(errors.Wrap(err, "Error parsing query time range")) + } + + params := &pquerier.SelectParams{Name: request.Metric, + Functions: strings.Join(request.Aggregators, ","), + Step: step, + Filter: request.FilterExpression, + From: from, + To: to} + // Select query to get back a series set iterator + seriesSet, err := tsdbQuerier.Select(params) + if err != nil { + return nil, errors.Wrap(err, "Failed to execute query select") + } + + // convert SeriesSet to JSON (Grafana simpleJson format) + jsonFormatter, err := formatter.NewFormatter("json", nil) + if err != nil { + return nil, errors.Wrap(err, "failed to start json formatter") + } + + var buffer bytes.Buffer + err = jsonFormatter.Write(&buffer, seriesSet) + + return buffer.String(), err +} + +// InitContext runs only once when the function runtime starts +func InitContext(context *nuclio.Context) error { + + // get configuration from env + tsdbTablePath := os.Getenv("QUERY_V3IO_TSDB_PATH") + if tsdbTablePath == "" { + return errors.New("QUERY_V3IO_TSDB_PATH must be set") + } + + context.Logger.InfoWith("Initializing", "tsdbTablePath", tsdbTablePath) + + // create v3io adapter + err := createV3ioQuerier(context, tsdbTablePath) + if err != nil { + return errors.Wrap(err, "Failed to initialize querier") + } + return nil +} + +func createV3ioQuerier(context *nuclio.Context, path string) error { + context.Logger.InfoWith("Creating v3io adapter", "path", path) + + defer tsdbQuerierMtx.Unlock() + tsdbQuerierMtx.Lock() + + if tsdbQuerier == nil { + v3ioConfig, err := config.GetOrLoadFromStruct(&config.V3ioConfig{ + TablePath: path, + }) + if err != nil { + return err + } + v3ioUrl := os.Getenv("V3IO_URL") + numWorkersStr := os.Getenv("V3IO_NUM_WORKERS") + var numWorkers int + if len(numWorkersStr) > 0 { + numWorkers, err = strconv.Atoi(numWorkersStr) + if err != nil { + return err + } + } else { + numWorkers = 8 + } + username := os.Getenv("V3IO_USERNAME") + if username == "" { + username = "iguazio" + } + password := os.Getenv("V3IO_PASSWORD") + containerName := os.Getenv("V3IO_CONTAINER") + if containerName == "" { + containerName = "bigdata" + } + container, err := tsdb.NewContainer(v3ioUrl, numWorkers, "", username, password, containerName, context.Logger) + if err != nil { + return err + } + // create adapter once for all contexts + adapter, err := tsdb.NewV3ioAdapter(v3ioConfig, container, context.Logger) + if err != nil { + return err + } + // Create TSDB Querier + tsdbQuerier, err = adapter.QuerierV2() + if err != nil { + return errors.Wrap(err, "Failed to initialize querier") + } + } + return nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/examples/v3io-tsdb-config.yaml.template b/functions/query/vendor/github.com/v3io/v3io-tsdb/examples/v3io-tsdb-config.yaml.template new file mode 100644 index 00000000..e5db2b3b --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/examples/v3io-tsdb-config.yaml.template @@ -0,0 +1,24 @@ +# File: v3io-tsdb-config.yaml +# Description: Template of a V3IO TSDB Configuration File + +# TODO: In your configuration file, delete the configuration keys that you +# don't need and replace the "<...>" placeholders. + +# Endpoint of an Iguazio Data Science Platform web-gateway (web-API) service, +# consisting of an IP address or resolvable host domain name, and a port number +# (currently, always port 8081) +# Example: "192.168.1.100:8081" +webApiEndpoint: ":8081" + +# Name of an Iguazio Data Science Platform container for storing the TSDB table +# Example: "bigdata" +container: "" + +# Log level +# Valid values: "debug" | "info" | "warn" | "error" +logLevel: "warn" + +# Authentication credentials for the web-API service +username: "" +password: "" + diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/go.mod b/functions/query/vendor/github.com/v3io/v3io-tsdb/go.mod new file mode 100644 index 00000000..3f5c1b4a --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/go.mod @@ -0,0 +1,27 @@ +module github.com/v3io/v3io-tsdb + +go 1.14 + +require ( + github.com/cespare/xxhash v1.1.0 + github.com/ghodss/yaml v1.0.0 + github.com/imdario/mergo v0.3.7 + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/nuclio/logger v0.0.1 + github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324 + github.com/nuclio/zap v0.0.2 + github.com/pkg/errors v0.8.1 + github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a + github.com/spf13/cobra v0.0.3 + github.com/stretchr/testify v1.4.0 + github.com/v3io/frames v0.7.10 + github.com/v3io/v3io-go v0.1.5-0.20200416113214-f1b82b9a8e82 + github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 +) + +replace ( + github.com/v3io/v3io-tsdb => ./ + github.com/xwb1989/sqlparser => github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871 + labix.org/v2/mgo => github.com/go-mgo/mgo v0.0.0-20180705113738-7446a0344b7872c067b3d6e1b7642571eafbae17 + launchpad.net/gocheck => github.com/go-check/check v0.0.0-20180628173108-788fd78401277ebd861206a03c884797c6ec5541 +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/go.sum b/functions/query/vendor/github.com/v3io/v3io-tsdb/go.sum new file mode 100644 index 00000000..6f9239cc --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/go.sum @@ -0,0 +1,122 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc70QXJ07+2eg2Jy2EC7Mi11BWujeM= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/nuclio/errors v0.0.1 h1:JoADBDnhRKjW05Npu5CLS27Peo7gx+QZcNrLwINV6UY= +github.com/nuclio/errors v0.0.1/go.mod h1:it2rUqDarIL8PasLYZo0Q1Ebsx4NRPM+OyYYakgNyrQ= +github.com/nuclio/logger v0.0.0-20190303161055-fc1e4b16d127/go.mod h1:ttazNAqTxKjQ7XrGDZxecumGa9KCIuJh88gzFY1mRXo= +github.com/nuclio/logger v0.0.1 h1:e+vT/Ug65RC+u0QX2J+lq3P57ZBwJ1ZA6Q2LCEcViwE= +github.com/nuclio/logger v0.0.1/go.mod h1:ttazNAqTxKjQ7XrGDZxecumGa9KCIuJh88gzFY1mRXo= +github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324 h1:wSCJEH8mUQ3VTyUukbYdxmi0UMmB14Lu1GOlNOs0dWY= +github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324/go.mod h1:NqMgotiF6Y0Ho4+i5AvJhH3FRKAyL4IMaMv/eoUOkKQ= +github.com/nuclio/zap v0.0.2 h1:rY5PkMOl8CTkqRqIPuxziBiKK6Mq/8oEurfgRnNtqf0= +github.com/nuclio/zap v0.0.2/go.mod h1:SUxPsgePvlyjx6c5MtGdB50pf0IQThtlyLwISLboeuc= +github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8 h1:WqLgmr/wj9TO5Sc6oYPQRAJBxuHE0NTeuVeFnT+FZVo= +github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8/go.mod h1:6FWOCx06uh50GClv8S2cfk3asqTJs3qq3ZNRtLZE77I= +github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rs/xid v1.1.0/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y= +github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/v3io/frames v0.7.10 h1:u5hZNOgrtuuZDqOXHgvwWNnNvGCOkByP+7c9peDZY2w= +github.com/v3io/frames v0.7.10/go.mod h1:33CcutEG8loyOg7NWpOLujqg0EN7Ofjojk7Uh5uqFHQ= +github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871 h1:myF4tU/HdFWU1UzMdf16cHRbownzsyvL7VKIHqkrSvo= +github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871/go.mod h1:QD2Bo64oyTWzeV8RFehXS0hZEDFgOK99/h2a6ErRu6E= +github.com/v3io/v3io-go v0.1.5-0.20200416113214-f1b82b9a8e82 h1:4LEQnRvqUtAk++AOKlrIUa13KJmmc7i4dy+gFej4vQk= +github.com/v3io/v3io-go v0.1.5-0.20200416113214-f1b82b9a8e82/go.mod h1:D0W1tjsVgcp4xk3ZI2fjKTKaOpYJLewN1BPN0x2osO4= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0 h1:dzZJf2IuMiclVjdw0kkT+f9u4YdrapbNyGAN47E/qnk= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181026194446-8b5d7a19e2d9 h1:26lptpu+T60F849wXfTQMz9ecFf6nTQM0J1JjLSga5U= +google.golang.org/genproto v0.0.0-20181026194446-8b5d7a19e2d9/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +zombiezen.com/go/capnproto2 v2.17.0+incompatible h1:sIoKPFGNlM38Qh+PBLa9Wzg1j99oInS/Qlk+5N/CHa4= +zombiezen.com/go/capnproto2 v2.17.0+incompatible/go.mod h1:XO5Pr2SbXgqZwn0m0Ru54QBqpOf4K5AYBO+8LAOBQEQ= diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go new file mode 100644 index 00000000..8f2c5f37 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go @@ -0,0 +1,196 @@ +package performance + +import ( + "fmt" + "io" + "log" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" + "github.com/rcrowley/go-metrics" + "github.com/v3io/v3io-tsdb/pkg/config" +) + +const ( + reservoirSize = 100 +) + +var instance *MetricReporter +var once sync.Once + +const ( + STDOUT = "stdout" + STDERR = "stderr" +) + +type MetricReporter struct { + lock sync.Mutex + running bool + registry metrics.Registry + logWriter io.Writer + reportPeriodically bool + reportIntervalSeconds int + reportOnShutdown bool +} + +func DefaultReporterInstance() (reporter *MetricReporter, err error) { + cfg, err := config.GetOrDefaultConfig() + + if err != nil { + // DO NOT return the error to prevent failures of unit tests + fmt.Fprintf(os.Stderr, "unable to load configuration. Reason: %v\n"+ + "Will use default reporter configuration instead.", err) + reporter = ReporterInstance(STDOUT, true, 60, true) + } else { + reporter = ReporterInstanceFromConfig(cfg) + } + + return reporter, nil +} + +func ReporterInstance(writeTo string, reportPeriodically bool, reportIntervalSeconds int, reportOnShutdown bool) *MetricReporter { + once.Do(func() { + var writer io.Writer + switch writeTo { + case STDOUT: + writer = os.Stdout + case STDERR: + writer = os.Stderr + default: + writer = os.Stdout + } + + instance = newMetricReporter(writer, reportPeriodically, reportIntervalSeconds, reportOnShutdown) + }) + return instance +} + +func ReporterInstanceFromConfig(config *config.V3ioConfig) *MetricReporter { + return ReporterInstance( + config.MetricsReporter.Output, + config.MetricsReporter.ReportPeriodically, + config.MetricsReporter.RepotInterval, + config.MetricsReporter.ReportOnShutdown) +} + +func (mr *MetricReporter) Start() error { + mr.lock.Lock() + defer mr.lock.Unlock() + + if mr.isEnabled() && !mr.running { + mr.running = true + } else { + return errors.Errorf("metric reporter is already running.") + } + + return nil +} + +func (mr *MetricReporter) Stop() error { + mr.lock.Lock() + defer mr.lock.Unlock() + + if mr.running { + mr.running = false + if mr.reportOnShutdown { + time.Sleep(300 * time.Millisecond) // postpone performance report on shutdown to avoid mixing with other log messages + metrics.WriteOnce(mr.registry, mr.logWriter) + } + mr.registry.UnregisterAll() + } else { + return errors.Errorf("can't stop metric reporter since it's not running.") + } + + return nil +} + +func (mr *MetricReporter) WithTimer(name string, body func()) { + if mr.isRunning() { + timer := metrics.GetOrRegisterTimer(name, mr.registry) + timer.Time(body) + } else { + body() + } +} + +func (mr *MetricReporter) IncrementCounter(name string, count int64) { + if mr.isRunning() { + counter := metrics.GetOrRegisterCounter(name, mr.registry) + counter.Inc(count) + } +} + +func (mr *MetricReporter) UpdateMeter(name string, count int64) { + if mr.isRunning() { + meter := metrics.GetOrRegisterMeter(name, mr.registry) + meter.Mark(count) + } +} + +func (mr *MetricReporter) UpdateHistogram(name string, value int64) { + if mr.isRunning() { + histogram := metrics.GetOrRegisterHistogram(name, mr.registry, metrics.NewUniformSample(reservoirSize)) + histogram.Update(value) + } +} + +// Listen to the SIGINT and SIGTERM +// SIGINT will listen to CTRL-C. +// SIGTERM will be caught if kill command executed. +func (mr *MetricReporter) registerShutdownHook() { + var gracefulStop = make(chan os.Signal) + // Register for specific signals + signal.Notify(gracefulStop, syscall.SIGINT, syscall.SIGTERM) + + go func() { + sig := <-gracefulStop + _, err := mr.logWriter.Write([]byte(fmt.Sprintf("\n**************************\ncaught sig: %+v\n**************************\n", sig))) + if err == nil { + metrics.WriteOnce(mr.registry, mr.logWriter) + } + }() +} + +func newMetricReporter(outputWriter io.Writer, reportPeriodically bool, reportIntervalSeconds int, reportOnShutdown bool) *MetricReporter { + var writer io.Writer + + if outputWriter != nil { + writer = outputWriter + } else { + writer = os.Stderr + } + + reporter := MetricReporter{ + registry: metrics.NewPrefixedRegistry("v3io-tsdb -> "), + logWriter: writer, + running: true, + reportPeriodically: reportPeriodically, + reportIntervalSeconds: reportIntervalSeconds, + reportOnShutdown: reportOnShutdown, + } + + if reportPeriodically && reportIntervalSeconds > 0 { + // Log periodically + go metrics.Log(reporter.registry, + time.Duration(reportIntervalSeconds)*time.Second, + log.New(reporter.logWriter, "metrics: ", log.Lmicroseconds)) + } + + if reportOnShutdown { + reporter.registerShutdownHook() + } + + return &reporter +} + +func (mr *MetricReporter) isEnabled() bool { + return mr.reportOnShutdown || mr.reportPeriodically +} + +func (mr *MetricReporter) isRunning() bool { + return false +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go new file mode 100644 index 00000000..64d85d16 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go @@ -0,0 +1,419 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package aggregate + +import ( + "fmt" + "math" + "strings" + + "github.com/v3io/v3io-tsdb/pkg/config" +) + +type AggrType uint16 + +// Aggregation functions +const ( + AggregateLabel = "Aggregate" + CrossSeriesSuffix = "_all" + + aggrTypeNone AggrType = 0 + aggrTypeCount AggrType = 1 + aggrTypeSum AggrType = 2 + aggrTypeSqr AggrType = 4 + aggrTypeMax AggrType = 8 + aggrTypeMin AggrType = 16 + aggrTypeLast AggrType = 32 + + // Derived aggregates + aggrTypeAvg AggrType = aggrTypeCount | aggrTypeSum + aggrTypeRate AggrType = aggrTypeLast | 0x8000 + aggrTypeStddev AggrType = aggrTypeCount | aggrTypeSum | aggrTypeSqr + aggrTypeStdvar AggrType = aggrTypeCount | aggrTypeSum | aggrTypeSqr | 0x8000 + aggrTypeAll AggrType = 0xffff +) + +var rawAggregates = []AggrType{aggrTypeCount, aggrTypeSum, aggrTypeSqr, aggrTypeMax, aggrTypeMin, aggrTypeLast} +var rawAggregatesMask = aggrTypeCount | aggrTypeSum | aggrTypeSqr | aggrTypeMax | aggrTypeMin | aggrTypeLast + +var aggrTypeString = map[string]AggrType{ + "count": aggrTypeCount, "sum": aggrTypeSum, "sqr": aggrTypeSqr, "max": aggrTypeMax, "min": aggrTypeMin, + "last": aggrTypeLast, "avg": aggrTypeAvg, "rate": aggrTypeRate, + "stddev": aggrTypeStddev, "stdvar": aggrTypeStdvar, "*": aggrTypeAll} + +var aggrToString = map[AggrType]string{ + aggrTypeCount: "count", aggrTypeSum: "sum", aggrTypeSqr: "sqr", aggrTypeMin: "min", aggrTypeMax: "max", + aggrTypeLast: "last", aggrTypeAvg: "avg", aggrTypeRate: "rate", + aggrTypeStddev: "stddev", aggrTypeStdvar: "stdvar", aggrTypeAll: "*", +} + +var aggrToSchemaField = map[string]config.SchemaField{ + "count": {Name: "count", Type: "array", Nullable: true, Items: "double"}, + "sum": {Name: "sum", Type: "array", Nullable: true, Items: "double"}, + "sqr": {Name: "sqr", Type: "array", Nullable: true, Items: "double"}, + "max": {Name: "max", Type: "array", Nullable: true, Items: "double"}, + "min": {Name: "min", Type: "array", Nullable: true, Items: "double"}, + "last": {Name: "last", Type: "array", Nullable: true, Items: "double"}, + "avg": {Name: "avg", Type: "array", Nullable: true, Items: "double"}, + "rate": {Name: "rate", Type: "array", Nullable: true, Items: "double"}, + "stddev": {Name: "stddev", Type: "array", Nullable: true, Items: "double"}, + "stdvar": {Name: "stdvar", Type: "array", Nullable: true, Items: "double"}, +} + +func (a AggrType) HasAverage() bool { + return (a & aggrTypeAvg) == aggrTypeAvg +} + +func SchemaFieldFromString(aggregates []string, col string) ([]config.SchemaField, error) { + fieldList := make([]config.SchemaField, 0, len(aggregates)) + for _, s := range aggregates { + trimmed := strings.TrimSpace(s) + if trimmed != "" { + if trimmed == "*" { + fieldList = make([]config.SchemaField, 0, len(aggrToSchemaField)) + for _, val := range aggrToSchemaField { + fieldList = append(fieldList, getAggrFullName(val, col)) + } + } else { + field, ok := aggrToSchemaField[trimmed] + if !ok { + return nil, fmt.Errorf("invalid aggragator type '%s'", trimmed) + } + fieldList = append(fieldList, getAggrFullName(field, col)) + } + } + } + return fieldList, nil +} + +func getAggrFullName(field config.SchemaField, col string) config.SchemaField { + fullName := fmt.Sprintf("_%s_%s", col, field.Name) + field.Name = fullName + return field +} + +func (a AggrType) String() string { return aggrToString[a] } + +func RawAggregatesToStringList(aggregates string) ([]string, error) { + aggrs := strings.Split(aggregates, ",") + aggType, _, err := AggregatesFromStringListWithCount(aggrs) + if err != nil { + return nil, err + } + var list []string + for _, aggr := range rawAggregates { + if aggr&aggType != 0 { + list = append(list, aggrToString[aggr]) + } + } + + return list, nil +} + +func ParseCrossLabelSets(str string) [][]string { + var res [][]string + labelSetStrings := strings.Split(str, ";") + for _, labelSetString := range labelSetStrings { + labelSet := strings.Split(strings.TrimSpace(labelSetString), ",") + var trimmedLabelSet []string + for _, label := range labelSet { + trimmedLabel := strings.TrimSpace(label) + if trimmedLabel != "" { + trimmedLabelSet = append(trimmedLabelSet, trimmedLabel) + } + } + if len(trimmedLabelSet) > 0 { + res = append(res, trimmedLabelSet) + } + } + return res +} + +// Convert a comma-separated aggregation-functions string to an aggregates mask +func AggregatesFromStringListWithCount(split []string) (AggrType, []AggrType, error) { + var aggrMask AggrType + var aggrList []AggrType + + var hasAggregates bool + for _, s := range split { + aggr, err := FromString(s) + if err != nil { + return 0, nil, err + } + if aggr != 0 { + hasAggregates = true + aggrMask = aggrMask | aggr + aggrList = append(aggrList, aggr) + } + } + // Always have count aggregate by default + if hasAggregates { + aggrMask = aggrMask | aggrTypeCount + aggrList = append(aggrList, aggrTypeCount) + } + return aggrMask, aggrList, nil +} + +func FromString(aggrString string) (AggrType, error) { + trimmed := strings.TrimSpace(aggrString) + if trimmed == "" { + return 0, nil + } + aggr, ok := aggrTypeString[trimmed] + if !ok { + return 0, fmt.Errorf("invalid aggragate type: %v", trimmed) + } + return aggr, nil +} + +// Create a list of aggregate objects from an aggregates mask +func NewAggregatesList(aggrType AggrType) *AggregatesList { + list := AggregatesList{} + if (aggrType & aggrTypeCount) != 0 { + list = append(list, &CountAggregate{}) + } + if (aggrType & aggrTypeSum) != 0 { + list = append(list, &SumAggregate{FloatAggregate{attr: "sum"}}) + } + if (aggrType & aggrTypeSqr) != 0 { + list = append(list, &SqrAggregate{FloatAggregate{attr: "sqr"}}) + } + if (aggrType & aggrTypeMin) != 0 { + list = append(list, &MinAggregate{FloatAggregate{attr: "min", val: math.Inf(1)}}) + } + if (aggrType & aggrTypeMax) != 0 { + list = append(list, &MaxAggregate{FloatAggregate{attr: "max", val: math.Inf(-1)}}) + } + if (aggrType & aggrTypeLast) != 0 { + list = append(list, &LastAggregate{FloatAggregate{attr: "last", val: math.Inf(-1)}, 0}) + } + return &list +} + +// List of aggregates +type AggregatesList []Aggregate + +// Append a value to all aggregates +func (a AggregatesList) Aggregate(t int64, val interface{}) { + v, ok := val.(float64) + if !ok { + return + } + for _, aggr := range a { + aggr.Aggregate(t, v) + } +} + +// Return an update expression for the aggregates in the given aggregates list +func (a AggregatesList) UpdateExpr(col string, bucket int) string { + expr := "" + for _, aggr := range a { + expr = expr + aggr.UpdateExpr(col, bucket) + } + return expr +} + +// Return an aggregates set expression (first value) or update expression +func (a AggregatesList) SetOrUpdateExpr(col string, bucket int, isNew bool) string { + if isNew { + return a.SetExpr(col, bucket) + } + return a.UpdateExpr(col, bucket) +} + +func (a AggregatesList) SetExpr(col string, bucket int) string { + expr := "" + for _, aggr := range a { + expr = expr + aggr.SetExpr(col, bucket) + } + return expr +} + +// Return an aggregates array-initialization expression +func (a AggregatesList) InitExpr(col string, buckets int) string { + expr := "" + for _, aggr := range a { + expr = expr + aggr.InitExpr(col, buckets) + } + return expr +} + +// Clear all aggregates +func (a AggregatesList) Clear() { + for _, aggr := range a { + aggr.Clear() + } +} + +func GetHiddenAggregates(mask AggrType, requestedAggregates []AggrType) []AggrType { + var hiddenAggregates []AggrType + + for _, aggr := range rawAggregates { + if aggr&mask == aggr && !ContainsAggregate(requestedAggregates, aggr) { + hiddenAggregates = append(hiddenAggregates, aggr) + } + } + return hiddenAggregates +} + +func GetHiddenAggregatesWithCount(mask AggrType, requestedAggregates []AggrType) []AggrType { + mask |= aggrTypeCount + return GetHiddenAggregates(mask, requestedAggregates) +} + +func ContainsAggregate(list []AggrType, item AggrType) bool { + for _, v := range list { + if v == item { + return true + } + } + return false +} + +func IsRawAggregate(item AggrType) bool { return ContainsAggregate(rawAggregates, item) } + +func IsCountAggregate(aggr AggrType) bool { return aggr == aggrTypeCount } + +func HasAggregates(mask AggrType) bool { return mask != aggrTypeNone } + +func MaskToString(mask AggrType) string { + var output strings.Builder + aggCount := 0 + for _, raw := range rawAggregates { + if mask&raw == raw { + if aggCount != 0 { + output.WriteString(",") + } + output.WriteString(aggrToString[raw]) + aggCount++ + } + } + + return output.String() +} + +func ToAttrName(aggr AggrType) string { + return config.AggregateAttrPrefix + aggr.String() +} + +func GetServerAggregationsFunction(aggr AggrType) (func(interface{}, interface{}) interface{}, error) { + switch aggr { + case aggrTypeCount: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return old.(float64) + next.(float64) + }, nil + case aggrTypeSum: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return old.(float64) + next.(float64) + }, nil + case aggrTypeSqr: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return old.(float64) + next.(float64) + }, nil + case aggrTypeMin: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return math.Min(old.(float64), next.(float64)) + }, nil + case aggrTypeMax: + return func(old, next interface{}) interface{} { + if old == nil { + return next + } + return math.Max(old.(float64), next.(float64)) + }, nil + case aggrTypeLast: + return func(_, next interface{}) interface{} { + return next + }, nil + default: + return nil, fmt.Errorf("unsupported server side aggregate %v", aggrToString[aggr]) + } +} + +func GetServerVirtualAggregationFunction(aggr AggrType) (func([]float64) float64, error) { + switch aggr { + case aggrTypeAvg: + return func(data []float64) float64 { + count := data[0] + sum := data[1] + return sum / count + }, nil + case aggrTypeStddev: + return func(data []float64) float64 { + count := data[0] + sum := data[1] + sqr := data[2] + return math.Sqrt((count*sqr - sum*sum) / (count * (count - 1))) + }, nil + case aggrTypeStdvar: + return func(data []float64) float64 { + count := data[0] + sum := data[1] + sqr := data[2] + return (count*sqr - sum*sum) / (count * (count - 1)) + }, nil + default: + return nil, fmt.Errorf("cannot aggregate %v", aggrToString[aggr]) + } +} + +func GetClientAggregationsFunction(aggr AggrType) (func(interface{}, interface{}) interface{}, error) { + switch aggr { + case aggrTypeCount: + return func(old, next interface{}) interface{} { + if old == nil { + return 1.0 + } + return old.(float64) + 1.0 + }, nil + case aggrTypeSqr: + return func(old, next interface{}) interface{} { + if old == nil { + return next.(float64) * next.(float64) + } + return old.(float64) + next.(float64)*next.(float64) + }, nil + default: + return GetServerAggregationsFunction(aggr) + } +} + +func GetDependantAggregates(aggr AggrType) []AggrType { + var aggregates []AggrType + for _, rawAggr := range rawAggregates { + if aggr&rawAggr == rawAggr { + aggregates = append(aggregates, rawAggr) + } + } + return aggregates +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate_test.go new file mode 100644 index 00000000..72d158b1 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate_test.go @@ -0,0 +1,144 @@ +// +build unit + +package aggregate + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func TestAggregates(t *testing.T) { + testCases := []struct { + desc string + aggString string + data map[int64]float64 + exprCol string + bucket int + expectedUpdateExpr string + expectedSetExpr string + expectFail bool + ignoreReason string + }{ + {desc: "Should aggregate data with Count aggregate", + aggString: "count", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: "_v_count[1]=_v_count[1]+2;", expectedSetExpr: "_v_count[1]=2;"}, + + {desc: "Should aggregate data with Sum aggregate", + aggString: "sum", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_sum[1]=_v_sum[1]+%s;_v_count[1]=_v_count[1]+2;", utils.FloatToNormalizedScientificStr(10.0)), + expectedSetExpr: fmt.Sprintf("_v_sum[1]=%s;_v_count[1]=2;", utils.FloatToNormalizedScientificStr(10.0))}, + + {desc: "Should aggregate data with Sqr aggregate", + aggString: "sqr", + data: map[int64]float64{1: 2.0}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_sqr[1]=_v_sqr[1]+%s;_v_count[1]=_v_count[1]+1;", utils.FloatToNormalizedScientificStr(4.0)), + expectedSetExpr: fmt.Sprintf("_v_sqr[1]=%s;_v_count[1]=1;", utils.FloatToNormalizedScientificStr(4.0))}, + + {desc: "Should aggregate data with Min & Max aggregates", + aggString: "min,max", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_min[1]=min(_v_min[1],%s);_v_max[1]=max(_v_max[1],%s);_v_count[1]=_v_count[1]+2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5)), + expectedSetExpr: fmt.Sprintf("_v_min[1]=%s;_v_max[1]=%s;_v_count[1]=2;", + utils.FloatToNormalizedScientificStr(2.5), + utils.FloatToNormalizedScientificStr(7.5))}, + + {desc: "Should aggregate data with Count,Sum,Sqr,Last aggregates", + aggString: "count,sum,sqr,last", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_count[1]=_v_count[1]+2;_v_sum[1]=_v_sum[1]+%s;_v_sqr[1]=_v_sqr[1]+%s;_v_last[1]=%s;", + utils.FloatToNormalizedScientificStr(10.0), utils.FloatToNormalizedScientificStr(62.5), + utils.FloatToNormalizedScientificStr(2.5)), + expectedSetExpr: fmt.Sprintf("_v_count[1]=2;_v_sum[1]=%s;_v_sqr[1]=%s;_v_last[1]=%s;", + utils.FloatToNormalizedScientificStr(10.0), + utils.FloatToNormalizedScientificStr(62.5), utils.FloatToNormalizedScientificStr(2.5))}, + + {desc: "Should aggregate data with Wildcard aggregates", + aggString: "*", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_count[1]=_v_count[1]+2;_v_sum[1]=_v_sum[1]+%s;"+ + "_v_sqr[1]=_v_sqr[1]+%s;_v_min[1]=min(_v_min[1],%s);_v_max[1]=max(_v_max[1],%s);"+ + "_v_last[1]=%s;", utils.FloatToNormalizedScientificStr(10.0), + utils.FloatToNormalizedScientificStr(62.5), + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5), + utils.FloatToNormalizedScientificStr(2.5)), + expectedSetExpr: fmt.Sprintf("_v_count[1]=2;_v_sum[1]=%s;_v_sqr[1]=%s;"+ + "_v_min[1]=%s;_v_max[1]=%s;_v_last[1]=%s;", + utils.FloatToNormalizedScientificStr(10.0), utils.FloatToNormalizedScientificStr(62.5), + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5), + utils.FloatToNormalizedScientificStr(2.5))}, + + {desc: "Should aggregate data with Bad aggregate", + aggString: "not-real", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: "_v_count[1]=_v_count[1]+2;", expectedSetExpr: "_v_count[1]=2;", expectFail: true}, + + {desc: "Should aggregate data when specifying aggregates with sapces", + aggString: "min , max ", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_min[1]=min(_v_min[1],%s);_v_max[1]=max(_v_max[1],%s);_v_count[1]=_v_count[1]+2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5)), + expectedSetExpr: fmt.Sprintf("_v_min[1]=%s;_v_max[1]=%s;_v_count[1]=2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5))}, + + {desc: "Should aggregate data when specifying aggregates with empty values", + aggString: "min , ,max ", + data: map[int64]float64{1: 7.5, 2: 2.5}, + exprCol: "v", bucket: 1, + expectedUpdateExpr: fmt.Sprintf("_v_min[1]=min(_v_min[1],%s);_v_max[1]=max(_v_max[1],%s);_v_count[1]=_v_count[1]+2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5)), + expectedSetExpr: fmt.Sprintf("_v_min[1]=%s;_v_max[1]=%s;_v_count[1]=2;", + utils.FloatToNormalizedScientificStr(2.5), utils.FloatToNormalizedScientificStr(7.5))}, + } + + for _, test := range testCases { + t.Logf("%s\n", test.desc) + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testAggregateCase(t, test.aggString, test.data, test.exprCol, test.bucket, test.expectedUpdateExpr, + test.expectedSetExpr, test.expectFail) + }) + } +} + +func testAggregateCase(t *testing.T, aggString string, data map[int64]float64, exprCol string, bucket int, + expectedUpdateExpr string, expectedSetExpr string, expectFail bool) { + + aggregates, _, err := AggregatesFromStringListWithCount(strings.Split(aggString, ",")) + if err != nil { + if !expectFail { + t.Fatal(err) + } else { + return + } + } + aggregatesList := NewAggregatesList(aggregates) + + for k, v := range data { + aggregatesList.Aggregate(k, v) + } + + actualUpdateExpr := strings.Split(aggregatesList.UpdateExpr(exprCol, bucket), ";") + expectedUpdateExprSet := strings.Split(expectedUpdateExpr, ";") + assert.ElementsMatch(t, actualUpdateExpr, expectedUpdateExprSet) + + actualSetExpr := strings.Split(aggregatesList.SetExpr(exprCol, bucket), ";") + expectedSetExprSet := strings.Split(expectedSetExpr, ";") + assert.ElementsMatch(t, actualSetExpr, expectedSetExprSet) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregationParams.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregationParams.go new file mode 100644 index 00000000..94c6f11d --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregationParams.go @@ -0,0 +1,90 @@ +package aggregate + +import ( + "fmt" + "strings" +) + +type AggregationParams struct { + colName string // column name ("v" in timeseries) + aggrMask AggrType // the sum of aggregates (or between all aggregates) + rollupTime int64 // time per bucket (cell in the array) + Interval int64 // requested (query) aggregation step + buckets int // number of buckets in the array + overlapWindows []int // a list of overlapping windows (* interval), e.g. last 1hr, 6hr, 12hr, 24hr + aggregationWindow int64 // a time window on which to calculate the aggregation per Interval + disableClientAggregation bool + useServerAggregateCoefficient int +} + +func NewAggregationParams(functions, col string, + buckets int, + interval, aggregationWindow, rollupTime int64, + windows []int, + disableClientAggregation bool, + useServerAggregateCoefficient int) (*AggregationParams, error) { + + aggregatesList := strings.Split(functions, ",") + aggrMask, _, err := AggregatesFromStringListWithCount(aggregatesList) + if err != nil { + return nil, err + } + + newAggregateSeries := AggregationParams{ + aggrMask: aggrMask, + colName: col, + buckets: buckets, + rollupTime: rollupTime, + aggregationWindow: aggregationWindow, + Interval: interval, + overlapWindows: windows, + disableClientAggregation: disableClientAggregation, + useServerAggregateCoefficient: useServerAggregateCoefficient, + } + + return &newAggregateSeries, nil +} + +func (as *AggregationParams) CanAggregate(partitionAggr AggrType) bool { + // Get only the raw aggregates from what the user requested + aggrMask := rawAggregatesMask & as.aggrMask + // make sure the DB has all the aggregates we need (on bits in the mask) + // and that the requested interval is greater/eq to aggregate resolution and is an even divisor + // if interval and rollup are not even divisors we need higher resolution (3x) to smooth the graph + // when we add linear/spline graph projection we can reduce back to 1x + return ((aggrMask & partitionAggr) == aggrMask) && + (as.Interval/as.rollupTime > int64(as.useServerAggregateCoefficient) || (as.Interval == as.rollupTime && as.disableClientAggregation)) && + (as.aggregationWindow == 0 || as.aggregationWindow >= as.rollupTime) +} + +func (as *AggregationParams) GetAggrMask() AggrType { + return as.aggrMask +} + +func (as *AggregationParams) GetRollupTime() int64 { + return as.rollupTime +} + +func (as *AggregationParams) GetAggregationWindow() int64 { + return as.aggregationWindow +} + +func (as *AggregationParams) HasAggregationWindow() bool { + return as.aggregationWindow > 0 +} + +func (as *AggregationParams) toAttrName(aggr AggrType) string { + return fmt.Sprintf("_%v_%v", as.colName, aggr.String()) +} + +func (as *AggregationParams) GetAttrNames() []string { + var names []string + + for _, aggr := range rawAggregates { + if aggr&as.aggrMask != 0 { + names = append(names, as.toAttrName(aggr)) + } + } + + return names +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/functions.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/functions.go new file mode 100644 index 00000000..7cc2b73d --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/functions.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package aggregate + +import ( + "fmt" + "math" + + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type Aggregate interface { + Aggregate(t int64, v float64) + Clear() + GetAttr() string + UpdateExpr(col string, bucket int) string + SetExpr(col string, bucket int) string + InitExpr(col string, buckets int) string +} + +// Count aggregate +type CountAggregate struct { + count int +} + +func (a *CountAggregate) Aggregate(t int64, v float64) { a.count++ } +func (a *CountAggregate) Clear() { a.count = 0 } +func (a *CountAggregate) GetAttr() string { return "count" } + +func (a *CountAggregate) UpdateExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_count[%d]=_%s_count[%d]+%d;", col, bucket, col, bucket, a.count) +} + +func (a *CountAggregate) SetExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_count[%d]=%d;", col, bucket, a.count) +} + +func (a *CountAggregate) InitExpr(col string, buckets int) string { + return fmt.Sprintf("_%s_count=init_array(%d,'int');", col, buckets) +} + +// base float64 Aggregate +type FloatAggregate struct { + val float64 + attr string +} + +func (a *FloatAggregate) Clear() { a.val = 0 } +func (a *FloatAggregate) GetAttr() string { return a.attr } +func (a *FloatAggregate) GetVal() float64 { return a.val } +func (a *FloatAggregate) SetExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_%s[%d]=%s;", col, a.attr, bucket, utils.FloatToNormalizedScientificStr(a.val)) +} + +func (a *FloatAggregate) UpdateExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_%s[%d]=_%s_%s[%d]+%s;", col, a.attr, bucket, col, a.attr, bucket, + utils.FloatToNormalizedScientificStr(a.val)) +} + +func (a *FloatAggregate) InitExpr(col string, buckets int) string { + return fmt.Sprintf("_%s_%s=init_array(%d,'double',%f);", col, a.attr, buckets, a.val) +} + +// Sum Aggregate +type SumAggregate struct{ FloatAggregate } + +func (a *SumAggregate) Aggregate(t int64, v float64) { + if utils.IsDefined(v) { + a.val += v + } +} + +// Power of 2 Aggregate +type SqrAggregate struct{ FloatAggregate } + +func (a *SqrAggregate) Aggregate(t int64, v float64) { + if utils.IsDefined(v) { + a.val += v * v + } +} + +// Minimum Aggregate +type MinAggregate struct{ FloatAggregate } + +func (a *MinAggregate) Clear() { a.val = math.Inf(1) } + +func (a *MinAggregate) Aggregate(t int64, v float64) { + if v < a.val { + a.val = v + } +} +func (a *MinAggregate) UpdateExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_%s[%d]=min(_%s_%s[%d],%s);", col, a.attr, bucket, col, a.attr, bucket, + utils.FloatToNormalizedScientificStr(a.val)) +} + +// Maximum Aggregate +type MaxAggregate struct{ FloatAggregate } + +func (a *MaxAggregate) Clear() { a.val = math.Inf(-1) } + +func (a *MaxAggregate) Aggregate(t int64, v float64) { + if v > a.val { + a.val = v + } +} +func (a *MaxAggregate) UpdateExpr(col string, bucket int) string { + return fmt.Sprintf("_%s_%s[%d]=max(_%s_%s[%d],%s);", col, a.attr, bucket, col, a.attr, bucket, + utils.FloatToNormalizedScientificStr(a.val)) +} + +// Last value Aggregate +type LastAggregate struct { + FloatAggregate + lastT int64 +} + +func (a *LastAggregate) Clear() { a.val = math.Inf(-1) } + +func (a *LastAggregate) Aggregate(t int64, v float64) { + if t > a.lastT { + a.val = v + a.lastT = t + } +} + +func (a *LastAggregate) UpdateExpr(col string, bucket int) string { + if utils.IsUndefined(a.val) { + return "" + } + + return fmt.Sprintf("_%s_%s[%d]=%s;", col, a.attr, bucket, utils.FloatToNormalizedScientificStr(a.val)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go new file mode 100644 index 00000000..f8699148 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go @@ -0,0 +1,393 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package aggregate + +import ( + "fmt" + "math" + "strings" + + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Local cache of init arrays per aggregate type. Used to mimic memcopy and initialize data arrays with specific values +var initDataArrayCache = map[AggrType][]float64{} + +type Series struct { + colName string // column name ("v" in timeseries) + functions []AggrType // list of aggregation functions to return (count, avg, sum, ..) + aggrMask AggrType // the sum of aggregates (or between all aggregates) + rollupTime int64 // time per bucket (cell in the array) + interval int64 // requested (query) aggregation step + buckets int // number of buckets in the array + overlapWindows []int // a list of overlapping windows (* interval), e.g. last 1hr, 6hr, 12hr, 24hr +} + +func NewAggregateSeries(functions, col string, buckets int, interval, rollupTime int64, windows []int) (*Series, error) { + + split := strings.Split(functions, ",") + var aggrMask AggrType + var aggrList []AggrType + + for _, s := range split { + aggr, ok := aggrTypeString[s] + if !ok { + return nil, fmt.Errorf("invalid aggragator type %s", s) + } + aggrMask = aggrMask | aggr + aggrList = append(aggrList, aggr) + } + + // Always have count Aggregate by default + if aggrMask != 0 { + aggrMask |= aggrTypeCount + } + + newAggregateSeries := Series{ + aggrMask: aggrMask, + functions: aggrList, + colName: col, + buckets: buckets, + rollupTime: rollupTime, + interval: interval, + overlapWindows: windows, + } + + return &newAggregateSeries, nil +} + +func (as *Series) CanAggregate(partitionAggr AggrType) bool { + // keep only real aggregates + aggrMask := 0x7f & as.aggrMask + // make sure the DB has all the aggregates we need (on bits in the mask) + // and that the requested interval is greater/eq to aggregate resolution and is an even divisor + // if interval and rollup are not even divisors we need higher resolution (3x) to smooth the graph + // when we add linear/spline graph projection we can reduce back to 1x + return ((aggrMask & partitionAggr) == aggrMask) && + as.interval >= as.rollupTime && (as.interval%as.rollupTime == 0 || as.interval/as.rollupTime > 3) +} + +func (as *Series) GetAggrMask() AggrType { + return as.aggrMask +} + +func (as *Series) GetFunctions() []AggrType { + return as.functions +} + +func (as *Series) NumFunctions() int { + return len(as.functions) +} + +func (as *Series) toAttrName(aggr AggrType) string { + return "_" + as.colName + "_" + aggr.String() +} + +func (as *Series) GetAttrNames() []string { + var names []string + + for _, aggr := range rawAggregates { + if aggr&as.aggrMask != 0 { + names = append(names, as.toAttrName(aggr)) + } + } + + return names +} + +// create new aggregation set from v3io aggregation array attributes +func (as *Series) NewSetFromAttrs( + length, start, end int, mint, maxt int64, attrs *map[string]interface{}) (*Set, error) { + + aggrArrays := map[AggrType][]uint64{} + dataArrays := map[AggrType][]float64{} + + var maxAligned int64 + if as.overlapWindows != nil { + length = len(as.overlapWindows) + maxAligned = (maxt / as.interval) * as.interval + } + + for _, aggr := range rawAggregates { + if aggr&as.aggrMask != 0 { + attrBlob, ok := (*attrs)[as.toAttrName(aggr)] + if !ok { + return nil, fmt.Errorf("aggregation attribute %s was not found", as.toAttrName(aggr)) + } + aggrArrays[aggr] = utils.AsInt64Array(attrBlob.([]byte)) + + dataArrays[aggr] = make([]float64, length, length) + copy(dataArrays[aggr], getOrCreateInitDataArray(aggr, length)) + } + } + + aggrSet := Set{length: length, interval: as.interval, overlapWin: as.overlapWindows} + aggrSet.dataArrays = dataArrays + + arrayIndex := start + i := 0 + + for arrayIndex != end { + + if as.overlapWindows == nil { + + // standard aggregates (evenly spaced intervals) + cellIndex := int((int64(i) * as.rollupTime) / as.interval) + for aggr, array := range aggrArrays { + aggrSet.mergeArrayCell(aggr, cellIndex, array[arrayIndex]) + } + } else { + + // overlapping time windows (last 1hr, 6hr, ..) + t := mint + (int64(i) * as.rollupTime) + if t < maxAligned { + for i, win := range as.overlapWindows { + if t > maxAligned-int64(win)*as.interval { + for aggr, array := range aggrArrays { + aggrSet.mergeArrayCell(aggr, i, array[arrayIndex]) + } + } + } + } + + } + + i++ + arrayIndex = (arrayIndex + 1) % (as.buckets + 1) + } + + return &aggrSet, nil +} + +// prepare new aggregation set from v3io raw chunk attributes (in case there are no aggregation arrays) +func (as *Series) NewSetFromChunks(length int) *Set { + + if as.overlapWindows != nil { + length = len(as.overlapWindows) + } + + newAggregateSet := Set{length: length, interval: as.interval, overlapWin: as.overlapWindows} + dataArrays := map[AggrType][]float64{} + + for _, aggr := range rawAggregates { + if aggr&as.aggrMask != 0 { + dataArrays[aggr] = make([]float64, length, length) // TODO: len/capacity & reuse (pool) + initArray := getOrCreateInitDataArray(aggr, length) + copy(dataArrays[aggr], initArray) + } + } + + newAggregateSet.dataArrays = dataArrays + return &newAggregateSet +} + +type Set struct { + dataArrays map[AggrType][]float64 + length int + maxCell int + baseTime int64 + interval int64 + overlapWin []int +} + +func (as *Set) GetMaxCell() int { + return as.maxCell +} + +// append the value to a cell in all relevant aggregation arrays +func (as *Set) AppendAllCells(cell int, val float64) { + + if !isValidCell(cell, as) { + return + } + + if cell > as.maxCell { + as.maxCell = cell + } + + for aggr := range as.dataArrays { + as.updateCell(aggr, cell, val) + } +} + +// append/merge server aggregation values into aggregation per requested interval/step +// if the requested step interval is higher than stored interval we need to collapse multiple cells to one +func (as *Set) mergeArrayCell(aggr AggrType, cell int, val uint64) { + + if cell >= as.length { + return + } + + if cell > as.maxCell { + as.maxCell = cell + } + + if aggr == aggrTypeCount { + as.dataArrays[aggr][cell] += float64(val) + } else { + float := math.Float64frombits(val) + // When getting already aggregated sqr aggregate we just need to sum. + if aggr == aggrTypeSqr { + as.dataArrays[aggr][cell] += float + } else { + as.updateCell(aggr, cell, float) + } + } +} + +func isValidCell(cellIndex int, aSet *Set) bool { + return cellIndex >= 0 && + cellIndex < aSet.length +} + +// function specific aggregation +func (as *Set) updateCell(aggr AggrType, cell int, val float64) { + + if !isValidCell(cell, as) { + return + } + + cellValue := as.dataArrays[aggr][cell] + switch aggr { + case aggrTypeCount: + as.dataArrays[aggr][cell]++ + case aggrTypeSum: + as.dataArrays[aggr][cell] += val + case aggrTypeSqr: + as.dataArrays[aggr][cell] += val * val + case aggrTypeMin: + if val < cellValue { + as.dataArrays[aggr][cell] = val + } + case aggrTypeMax: + if val > cellValue { + as.dataArrays[aggr][cell] = val + } + case aggrTypeLast: + as.dataArrays[aggr][cell] = val + } +} + +// return the value per aggregate or complex function +func (as *Set) GetCellValue(aggr AggrType, cell int) (float64, bool) { + + if !isValidCell(cell, as) { + return math.NaN(), false + } + + dependsOnSum := aggr == aggrTypeStddev || aggr == aggrTypeStdvar || aggr == aggrTypeAvg + dependsOnSqr := aggr == aggrTypeStddev || aggr == aggrTypeStdvar + dependsOnLast := aggr == aggrTypeLast || aggr == aggrTypeRate + + // return undefined result one dependant fields is missing + if (dependsOnSum && utils.IsUndefined(as.dataArrays[aggrTypeSum][cell])) || + (dependsOnSqr && utils.IsUndefined(as.dataArrays[aggrTypeSqr][cell]) || + (dependsOnLast && utils.IsUndefined(as.dataArrays[aggrTypeLast][cell]))) { + return math.NaN(), false + } + + // if no samples in this bucket the result is undefined + var cnt float64 + if dependsOnSum { + cnt = as.dataArrays[aggrTypeCount][cell] + if cnt == 0 { + return math.NaN(), false + } + } + + switch aggr { + case aggrTypeAvg: + return as.dataArrays[aggrTypeSum][cell] / cnt, true + case aggrTypeStddev: + sum := as.dataArrays[aggrTypeSum][cell] + sqr := as.dataArrays[aggrTypeSqr][cell] + return math.Sqrt((cnt*sqr - sum*sum) / (cnt * (cnt - 1))), true + case aggrTypeStdvar: + sum := as.dataArrays[aggrTypeSum][cell] + sqr := as.dataArrays[aggrTypeSqr][cell] + return (cnt*sqr - sum*sum) / (cnt * (cnt - 1)), true + case aggrTypeRate: + if cell == 0 { + return math.NaN(), false + } + // TODO: need to clarify the meaning of this type of aggregation. IMHO, rate has meaning for monotonic counters only + last := as.dataArrays[aggrTypeLast][cell-1] + this := as.dataArrays[aggrTypeLast][cell] + return (this - last) / float64(as.interval/1000), true // rate per sec + default: + return as.dataArrays[aggr][cell], true + } +} + +// get the time per aggregate cell +func (as *Set) GetCellTime(base int64, index int) int64 { + if as.overlapWin == nil { + return base + int64(index)*as.interval + } + + if index >= len(as.overlapWin) { + return base + } + + return base - int64(as.overlapWin[index])*as.interval +} + +func (as *Set) Clear() { + as.maxCell = 0 + for aggr := range as.dataArrays { + initArray := getOrCreateInitDataArray(aggr, len(as.dataArrays[0])) + copy(as.dataArrays[aggr], initArray) + } +} + +// Check if cell has data. Assumes that count is always present +func (as *Set) HasData(cell int) bool { + return as.dataArrays[aggrTypeCount][cell] > 0 +} + +func getOrCreateInitDataArray(aggrType AggrType, length int) []float64 { + // Create once or override if required size is greater than existing array + if initDataArrayCache[aggrType] == nil || len(initDataArrayCache[aggrType]) < length { + initDataArrayCache[aggrType] = createInitDataArray(aggrType, length) + } + return initDataArrayCache[aggrType] +} + +func createInitDataArray(aggrType AggrType, length int) []float64 { + // Prepare "clean" array for fastest reset of "uninitialized" data arrays + resultArray := make([]float64, length, length) + + var initWith float64 + switch aggrType { + case aggrTypeMin: + initWith = math.Inf(1) + case aggrTypeMax: + initWith = math.Inf(-1) + default: + // NOP - default is 0 + } + + for i := range resultArray { + resultArray[i] = initWith + } + + return resultArray +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go new file mode 100644 index 00000000..26fd9ea5 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go @@ -0,0 +1,351 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package appender + +import ( + "fmt" + "sync" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// TODO: make configurable +const maxRetriesOnWrite = 3 +const channelSize = 4096 +const queueStallTime = 1 * time.Millisecond + +const minimalUnixTimeMs = 0 // year 1970 +const maxUnixTimeMs = 13569465600000 // year 2400 + +// to add, rollups policy (cnt, sum, min/max, sum^2) + interval , or policy in per name label +type MetricState struct { + sync.RWMutex + state storeState + Lset utils.LabelsIfc + key string + name string + hash uint64 + refID uint64 + + aggrs []*MetricState + + store *chunkStore + err error + retryCount uint8 + newName bool + isVariant bool +} + +// Metric store states +type storeState uint8 + +const ( + storeStateInit storeState = 0 + storeStatePreGet storeState = 1 // Need to get state + storeStateGet storeState = 2 // Getting old state from storage + storeStateReady storeState = 3 // Ready to update + storeStateUpdate storeState = 4 // Update/write in progress +) + +// store is ready to update samples into the DB +func (m *MetricState) isReady() bool { + return m.state == storeStateReady +} + +func (m *MetricState) getState() storeState { + return m.state +} + +func (m *MetricState) setState(state storeState) { + m.state = state +} + +func (m *MetricState) setError(err error) { + m.err = err +} + +func (m *MetricState) error() error { + m.RLock() + defer m.RUnlock() + return m.err +} + +type cacheKey struct { + name string + hash uint64 +} + +// store the state and metadata for all the metrics +type MetricsCache struct { + cfg *config.V3ioConfig + partitionMngr *partmgr.PartitionManager + mtx sync.RWMutex + container v3io.Container + logger logger.Logger + started bool + + responseChan chan *v3io.Response + nameUpdateChan chan *v3io.Response + asyncAppendChan chan *asyncAppend + updatesInFlight int + + metricQueue *ElasticQueue + updatesComplete chan int + newUpdates chan int + + lastMetric uint64 + + // TODO: consider switching to synch.Map (https://golang.org/pkg/sync/#Map) + cacheMetricMap map[cacheKey]*MetricState // TODO: maybe use hash as key & combine w ref + cacheRefMap map[uint64]*MetricState // TODO: maybe turn to list + free list, periodically delete old matrics + + NameLabelMap map[string]bool // temp store all lable names + + lastError error + performanceReporter *performance.MetricReporter + + stopChan chan int +} + +func NewMetricsCache(container v3io.Container, logger logger.Logger, cfg *config.V3ioConfig, + partMngr *partmgr.PartitionManager) *MetricsCache { + + newCache := MetricsCache{container: container, logger: logger, cfg: cfg, partitionMngr: partMngr} + newCache.cacheMetricMap = map[cacheKey]*MetricState{} + newCache.cacheRefMap = map[uint64]*MetricState{} + + newCache.responseChan = make(chan *v3io.Response, channelSize) + newCache.nameUpdateChan = make(chan *v3io.Response, channelSize) + newCache.asyncAppendChan = make(chan *asyncAppend, channelSize) + + newCache.metricQueue = NewElasticQueue() + newCache.updatesComplete = make(chan int, 100) + newCache.newUpdates = make(chan int, 1000) + newCache.stopChan = make(chan int, 3) + + newCache.NameLabelMap = map[string]bool{} + newCache.performanceReporter = performance.ReporterInstanceFromConfig(cfg) + + return &newCache +} + +type asyncAppend struct { + metric *MetricState + t int64 + v interface{} + resp chan int +} + +func (mc *MetricsCache) Start() error { + err := mc.start() + if err != nil { + return errors.Wrap(err, "Failed to start Appender loop") + } + + return nil +} + +// return metric struct by key +func (mc *MetricsCache) getMetric(name string, hash uint64) (*MetricState, bool) { + mc.mtx.RLock() + defer mc.mtx.RUnlock() + + metric, ok := mc.cacheMetricMap[cacheKey{name, hash}] + return metric, ok +} + +// create a new metric and save in the map +func (mc *MetricsCache) addMetric(hash uint64, name string, metric *MetricState) { + mc.mtx.Lock() + defer mc.mtx.Unlock() + + mc.lastMetric++ + metric.refID = mc.lastMetric + mc.cacheRefMap[mc.lastMetric] = metric + mc.cacheMetricMap[cacheKey{name, hash}] = metric + if _, ok := mc.NameLabelMap[name]; !ok { + metric.newName = true + mc.NameLabelMap[name] = true + } +} + +// return metric struct by refID +func (mc *MetricsCache) getMetricByRef(ref uint64) (*MetricState, bool) { + mc.mtx.RLock() + defer mc.mtx.RUnlock() + + metric, ok := mc.cacheRefMap[ref] + return metric, ok +} + +// Push append to async channel +func (mc *MetricsCache) appendTV(metric *MetricState, t int64, v interface{}) { + mc.asyncAppendChan <- &asyncAppend{metric: metric, t: t, v: v} +} + +// First time add time & value to metric (by label set) +func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint64, error) { + + err := verifyTimeValid(t) + if err != nil { + return 0, err + } + + var isValueVariantType bool + // If the value is not of Float type assume it's variant type. + switch v.(type) { + case int, int64, float64, float32: + isValueVariantType = false + default: + isValueVariantType = true + } + + name, key, hash := lset.GetKey() + err = utils.IsValidMetricName(name) + if err != nil { + return 0, err + } + metric, ok := mc.getMetric(name, hash) + + var aggrMetrics []*MetricState + if !ok { + for _, preAggr := range mc.partitionMngr.GetConfig().TableSchemaInfo.PreAggregates { + subLset := lset.Filter(preAggr.Labels) + name, key, hash := subLset.GetKey() + aggrMetric, ok := mc.getMetric(name, hash) + if !ok { + aggrMetric = &MetricState{Lset: subLset, key: key, name: name, hash: hash} + aggrMetric.store = newChunkStore(mc.logger, subLset.LabelNames(), true) + mc.addMetric(hash, name, aggrMetric) + aggrMetrics = append(aggrMetrics, aggrMetric) + } + } + metric = &MetricState{Lset: lset, key: key, name: name, hash: hash, + aggrs: aggrMetrics, isVariant: isValueVariantType} + + metric.store = newChunkStore(mc.logger, lset.LabelNames(), false) + mc.addMetric(hash, name, metric) + } else { + aggrMetrics = metric.aggrs + } + + err = metric.error() + metric.setError(nil) + + if isValueVariantType != metric.isVariant { + newValueType := "numeric" + if isValueVariantType { + newValueType = "string" + } + existingValueType := "numeric" + if metric.isVariant { + existingValueType = "string" + } + return 0, errors.Errorf("Cannot append %v type metric to %v type metric.", newValueType, existingValueType) + } + + mc.appendTV(metric, t, v) + for _, aggrMetric := range aggrMetrics { + mc.appendTV(aggrMetric, t, v) + } + + return metric.refID, err +} + +// fast Add to metric (by refID) +func (mc *MetricsCache) AddFast(ref uint64, t int64, v interface{}) error { + + err := verifyTimeValid(t) + if err != nil { + return err + } + + metric, ok := mc.getMetricByRef(ref) + if !ok { + mc.logger.ErrorWith("Ref not found", "ref", ref) + return fmt.Errorf("ref not found") + } + + err = metric.error() + metric.setError(nil) + + mc.appendTV(metric, t, v) + + for _, aggrMetric := range metric.aggrs { + mc.appendTV(aggrMetric, t, v) + } + + return err +} + +func verifyTimeValid(t int64) error { + if t > maxUnixTimeMs || t < minimalUnixTimeMs { + return fmt.Errorf("time '%d' doesn't seem to be a valid Unix timesamp in milliseconds. The time must be in the years range 1970-2400", t) + } + return nil +} +func (mc *MetricsCache) Close() { + //for 3 go funcs + mc.stopChan <- 0 + mc.stopChan <- 0 + mc.stopChan <- 0 +} + +func (mc *MetricsCache) WaitForCompletion(timeout time.Duration) (int, error) { + waitChan := make(chan int, 2) + mc.asyncAppendChan <- &asyncAppend{metric: nil, t: 0, v: 0, resp: waitChan} + + var maxWaitTime time.Duration + + if timeout == 0 { + maxWaitTime = 24 * time.Hour // Almost-infinite time + } else if timeout > 0 { + maxWaitTime = timeout + } else { + // If negative, use the default configured timeout value + maxWaitTime = time.Duration(mc.cfg.DefaultTimeoutInSeconds) * time.Second + } + + var resultCount int + var err error + + mc.performanceReporter.WithTimer("WaitForCompletionTimer", func() { + select { + case resultCount = <-waitChan: + err = mc.lastError + mc.lastError = nil + return + case <-time.After(maxWaitTime): + resultCount = 0 + err = errors.Errorf("The operation timed out after %.2f seconds.", maxWaitTime.Seconds()) + return + } + }) + + return resultCount, err +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/equeue.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/equeue.go new file mode 100644 index 00000000..615785b7 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/equeue.go @@ -0,0 +1,148 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package appender + +import ( + "sync" +) + +const ListSize = 256 + +type list [ListSize]*MetricState + +type ElasticQueue struct { + mtx sync.RWMutex + data []*list + head, tail int +} + +// Elastic Queue, a fifo queue with dynamic resize +func NewElasticQueue() *ElasticQueue { + newQueue := ElasticQueue{} + newQueue.data = append(newQueue.data, &list{}) + return &newQueue +} + +// Is the queue empty +func (eq *ElasticQueue) IsEmpty() bool { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + return eq.head == eq.tail +} + +// Number of elements in the queue +func (eq *ElasticQueue) Length() int { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + return eq.length() +} + +func (eq *ElasticQueue) length() int { + if eq.head >= eq.tail { + return eq.head - eq.tail + } + + return eq.head + (len(eq.data) * ListSize) - eq.tail +} + +func (eq *ElasticQueue) Push(val *MetricState) int { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + return eq.push(val) +} + +// Push a value to the queue +func (eq *ElasticQueue) push(val *MetricState) int { + headBlock, headOffset := eq.head/ListSize, eq.head%ListSize + tailBlock := eq.tail / ListSize + //wasEmpty := eq.head == eq.tail + + if headBlock == tailBlock-1 && headOffset == ListSize-1 { + eq.data = append(eq.data, &list{}) + copy(eq.data[tailBlock+1:], eq.data[tailBlock:]) + eq.data[tailBlock] = &list{} + + eq.tail += ListSize + + } + + if headBlock == len(eq.data)-1 && headOffset == ListSize-1 { + if tailBlock == 0 { + eq.data = append(eq.data, &list{}) + } + } + + eq.head = (eq.head + 1) % (len(eq.data) * ListSize) + eq.data[headBlock][headOffset] = val + return eq.length() +} + +func (eq *ElasticQueue) Pop() *MetricState { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + return eq.pop() +} + +func (eq *ElasticQueue) PopN(length int) []*MetricState { + eq.mtx.Lock() + defer eq.mtx.Unlock() + var list []*MetricState + + for i := 0; i < length; i++ { + metric := eq.pop() + if metric != nil { + list = append(list, metric) + } else { + break + } + } + + return list +} + +// return the oldest value in the queue +func (eq *ElasticQueue) pop() *MetricState { + if eq.head == eq.tail { + return nil + } + + tailBlock, tailOffset := eq.tail/ListSize, eq.tail%ListSize + eq.tail = (eq.tail + 1) % (len(eq.data) * ListSize) + + return eq.data[tailBlock][tailOffset] +} + +// Atomic rotate, push a value to the tail and pop one from the head +func (eq *ElasticQueue) Rotate(val *MetricState) (*MetricState, int) { + eq.mtx.Lock() + defer eq.mtx.Unlock() + + if eq.head == eq.tail { + return val, 0 + } + + length := eq.push(val) + return eq.pop(), length +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go new file mode 100644 index 00000000..eb349e0e --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go @@ -0,0 +1,396 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package appender + +import ( + "fmt" + "net/http" + "reflect" + "time" + + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Start event loops for handling metric updates (appends and Get/Update DB responses) +// TODO: we can use multiple Go routines and spread the metrics across based on Hash LSB. +func (mc *MetricsCache) start() error { + + mc.nameUpdateRespLoop() + mc.metricsUpdateLoop(0) + mc.metricFeed(0) + + return nil +} + +// Read data from the append queue, push it into per-metric queues, and manage ingestion states +func (mc *MetricsCache) metricFeed(index int) { + + go func() { + inFlight := 0 + gotData := false + potentialCompletion := false + var completeChan chan int + + for { + select { + case _ = <-mc.stopChan: + return + case inFlight = <-mc.updatesComplete: + // Handle completion notifications from the update loop + length := mc.metricQueue.Length() + mc.logger.Debug(`Complete update cycle - "in-flight requests"=%d; "metric queue length"=%d\n`, inFlight, length) + + // If data was sent and the queue is empty, mark as completion + if length == 0 && gotData { + switch len(mc.asyncAppendChan) { + case 0: + potentialCompletion = true + if completeChan != nil { + completeChan <- 0 + } + case 1: + potentialCompletion = true + } + } + case app := <-mc.asyncAppendChan: + newMetrics := 0 + dataQueued := 0 + numPushed := 0 + inLoop: + for i := 0; i <= mc.cfg.BatchSize; i++ { + if app.metric == nil { + // Handle update completion requests (metric == nil) + completeChan = app.resp + if potentialCompletion { + completeChan <- 0 + } + } else { + potentialCompletion = false + // Handle append requests (Add / AddFast) + gotData = true + metric := app.metric + metric.Lock() + + metric.store.Append(app.t, app.v) + numPushed++ + dataQueued += metric.store.samplesQueueLength() + + // If there are no in-flight requests, add the metric to the queue and update state + if metric.isReady() || metric.getState() == storeStateInit { + + if metric.getState() == storeStateInit { + metric.setState(storeStatePreGet) + } + if metric.isReady() { + metric.setState(storeStateUpdate) + } + + length := mc.metricQueue.Push(metric) + if length < 2*mc.cfg.Workers { + newMetrics++ + } + } + metric.Unlock() + } + // Poll if we have more updates (accelerate the outer select) + if i < mc.cfg.BatchSize { + select { + case app = <-mc.asyncAppendChan: + default: + break inLoop + } + } + } + // Notify the update loop that there are new metrics to process + if newMetrics > 0 { + mc.newUpdates <- newMetrics + } + + // If we have too much work, stall the queue for some time + if numPushed > mc.cfg.BatchSize/2 && dataQueued/numPushed > 64 { + switch { + case dataQueued/numPushed <= 96: + time.Sleep(queueStallTime) + case dataQueued/numPushed > 96 && dataQueued/numPushed < 200: + time.Sleep(4 * queueStallTime) + default: + time.Sleep(10 * queueStallTime) + } + } + } + } + }() +} + +// An async loop that accepts new metric updates or responses from previous updates and makes new storage requests +func (mc *MetricsCache) metricsUpdateLoop(index int) { + + go func() { + counter := 0 + for { + select { + case _ = <-mc.stopChan: + return + case _ = <-mc.newUpdates: + // Handle new metric notifications (from metricFeed) + for mc.updatesInFlight < mc.cfg.Workers*2 { //&& newMetrics > 0{ + freeSlots := mc.cfg.Workers*2 - mc.updatesInFlight + metrics := mc.metricQueue.PopN(freeSlots) + for _, metric := range metrics { + mc.postMetricUpdates(metric) + } + if len(metrics) < freeSlots { + break + } + } + + if mc.updatesInFlight == 0 { + mc.logger.Debug("Complete new update cycle - in-flight %d.\n", mc.updatesInFlight) + mc.updatesComplete <- 0 + } + case resp := <-mc.responseChan: + // Handle V3IO async responses + nonQueued := mc.metricQueue.IsEmpty() + + inLoop: + for i := 0; i <= mc.cfg.BatchSize; i++ { + + mc.updatesInFlight-- + counter++ + if counter%3000 == 0 { + mc.logger.Debug("Handle response: inFly %d, Q %d", mc.updatesInFlight, mc.metricQueue.Length()) + } + metric := resp.Context.(*MetricState) + mc.handleResponse(metric, resp, nonQueued) + + // Poll if we have more responses (accelerate the outer select) + if i < mc.cfg.BatchSize { + select { + case resp = <-mc.responseChan: + default: + break inLoop + } + } + } + + // Post updates if we have queued metrics and the channel has room for more + for mc.updatesInFlight < mc.cfg.Workers*2 { + freeSlots := mc.cfg.Workers*2 - mc.updatesInFlight + metrics := mc.metricQueue.PopN(freeSlots) + if len(metrics) == 0 { + break + } + for _, metric := range metrics { + mc.postMetricUpdates(metric) + } + } + + // Notify the metric feeder when all in-flight tasks are done + if mc.updatesInFlight == 0 { + mc.logger.Debug("Return to feed. Metric queue length: %d", mc.metricQueue.Length()) + mc.updatesComplete <- 0 + } + } + } + }() +} + +// Send a request with chunk data to the DB +// If in the initial state, read metric metadata from the DB. +func (mc *MetricsCache) postMetricUpdates(metric *MetricState) { + + metric.Lock() + defer metric.Unlock() + var sent bool + var err error + + if metric.getState() == storeStatePreGet { + sent, err = metric.store.getChunksState(mc, metric) + if err != nil { + // Count errors + mc.performanceReporter.IncrementCounter("GetChunksStateError", 1) + + mc.logger.ErrorWith("Failed to get item state", "metric", metric.Lset, "err", err) + setError(mc, metric, err) + } else { + metric.setState(storeStateGet) + } + + } else { + sent, err = metric.store.writeChunks(mc, metric) + if err != nil { + // Count errors + mc.performanceReporter.IncrementCounter("WriteChunksError", 1) + + mc.logger.ErrorWith("Submit failed", "metric", metric.Lset, "err", err) + setError(mc, metric, errors.Wrap(err, "Chunk write submit failed.")) + } else if sent { + metric.setState(storeStateUpdate) + } + if !sent { + if metric.store.samplesQueueLength() == 0 { + metric.setState(storeStateReady) + } else { + if mc.metricQueue.length() > 0 { + mc.newUpdates <- mc.metricQueue.length() + } + } + } + } + + if sent { + mc.updatesInFlight++ + } +} + +// Handle DB responses +// If the backlog queue is empty and have data to send, write more chunks to the DB. +func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, canWrite bool) bool { + defer resp.Release() + metric.Lock() + defer metric.Unlock() + + reqInput := resp.Request().Input + + if resp.Error != nil && metric.getState() != storeStateGet { + req := reqInput.(*v3io.UpdateItemInput) + mc.logger.DebugWith("I/O failure", "id", resp.ID, "err", resp.Error, "key", metric.key, + "in-flight", mc.updatesInFlight, "mqueue", mc.metricQueue.Length(), + "numsamples", metric.store.samplesQueueLength(), "path", req.Path, "update expression", req.Expression) + } else { + mc.logger.DebugWith("I/O response", "id", resp.ID, "err", resp.Error, "key", metric.key, "request type", + reflect.TypeOf(reqInput), "request", reqInput) + } + + if metric.getState() == storeStateGet { + // Handle Get response, sync metric state with the DB + metric.store.processGetResp(mc, metric, resp) + + } else { + // Handle Update Expression responses + if resp.Error == nil { + if !metric.store.isAggr() { + // Set fields so next write won't include redundant info (bytes, labels, init_array) + metric.store.ProcessWriteResp() + } + metric.retryCount = 0 + } else { + clear := func() { + resp.Release() + metric.store = newChunkStore(mc.logger, metric.Lset.LabelNames(), metric.store.isAggr()) + metric.retryCount = 0 + metric.setState(storeStateInit) + } + + // Count errors + mc.performanceReporter.IncrementCounter("ChunkUpdateRetries", 1) + + // Metrics with too many update errors go into Error state + metric.retryCount++ + if e, hasStatusCode := resp.Error.(v3ioerrors.ErrorWithStatusCode); hasStatusCode && e.StatusCode() != http.StatusServiceUnavailable { + // If condition was evaluated as false log this and report this error upstream. + if utils.IsFalseConditionError(resp.Error) { + req := reqInput.(*v3io.UpdateItemInput) + // This might happen on attempt to add metric value of wrong type, i.e. float <-> string + errMsg := fmt.Sprintf("failed to ingest values of incompatible data type into metric %s.", req.Path) + mc.logger.DebugWith(errMsg) + setError(mc, metric, errors.New(errMsg)) + } else { + mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed with status code %d.", e.StatusCode())) + setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed due to status code %d.", e.StatusCode()))) + } + clear() + return false + } else if metric.retryCount == maxRetriesOnWrite { + mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed - exceeded %d retries", maxRetriesOnWrite), "metric", metric.Lset) + setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed after %d retries.", maxRetriesOnWrite))) + clear() + + // Count errors + mc.performanceReporter.IncrementCounter("ChunkUpdateRetryExceededError", 1) + return false + } + } + } + + metric.setState(storeStateReady) + + var sent bool + var err error + + if canWrite { + sent, err = metric.store.writeChunks(mc, metric) + if err != nil { + // Count errors + mc.performanceReporter.IncrementCounter("WriteChunksError", 1) + + mc.logger.ErrorWith("Submit failed", "metric", metric.Lset, "err", err) + setError(mc, metric, errors.Wrap(err, "Chunk write submit failed.")) + } else if sent { + metric.setState(storeStateUpdate) + mc.updatesInFlight++ + } + + } else if metric.store.samplesQueueLength() > 0 { + mc.metricQueue.Push(metric) + metric.setState(storeStateUpdate) + } + + return sent +} + +// Handle responses for names table updates +func (mc *MetricsCache) nameUpdateRespLoop() { + + go func() { + for { + select { + case _ = <-mc.stopChan: + return + case resp := <-mc.nameUpdateChan: + // Handle V3IO PutItem in names table + metric, ok := resp.Context.(*MetricState) + if ok { + metric.Lock() + if resp.Error != nil { + // Count errors + mc.performanceReporter.IncrementCounter("UpdateNameError", 1) + + mc.logger.ErrorWith("Update-name process failed", "id", resp.ID, "name", metric.name) + } else { + mc.logger.DebugWith("Update-name process response", "id", resp.ID, "name", metric.name) + } + metric.Unlock() + } + + resp.Release() + } + } + }() +} + +func setError(mc *MetricsCache, metric *MetricState, err error) { + metric.setError(err) + mc.lastError = err +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go new file mode 100644 index 00000000..d97cab5e --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go @@ -0,0 +1,508 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package appender + +import ( + "encoding/base64" + "fmt" + "path/filepath" + "sort" + "time" + + "github.com/nuclio/logger" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// TODO: make it configurable +const maxLateArrivalInterval = 59 * 60 * 1000 // Max late arrival of 59min + +// Create a chunk store with two chunks (current, previous) +func newChunkStore(logger logger.Logger, labelNames []string, aggrsOnly bool) *chunkStore { + store := chunkStore{ + logger: logger, + lastTid: -1, + } + if !aggrsOnly { + store.chunks[0] = &attrAppender{} + store.chunks[1] = &attrAppender{} + } + store.labelNames = labelNames + store.performanceReporter, _ = performance.DefaultReporterInstance() + return &store +} + +// chunkStore store state & latest + previous chunk appenders +type chunkStore struct { + logger logger.Logger + performanceReporter *performance.MetricReporter + + curChunk int + nextTid int64 + lastTid int64 + chunks [2]*attrAppender + + labelNames []string + aggrList *aggregate.AggregatesList + pending pendingList + maxTime int64 + delRawSamples bool // TODO: for metrics w aggregates only +} + +func (cs *chunkStore) isAggr() bool { + return cs.chunks[0] == nil +} + +func (cs *chunkStore) samplesQueueLength() int { + return len(cs.pending) +} + +// Chunk appender object, state used for appending t/v to a chunk +type attrAppender struct { + state chunkState + appender chunkenc.Appender + partition *partmgr.DBPartition + chunkMint int64 +} + +type chunkState uint8 + +const ( + chunkStateFirst chunkState = 1 + chunkStateMerge chunkState = 2 + chunkStateCommitted chunkState = 4 + chunkStateWriting chunkState = 8 +) + +// Initialize/clear the chunk appender +func (a *attrAppender) initialize(partition *partmgr.DBPartition, t int64) { + a.state = 0 + a.partition = partition + a.chunkMint = partition.GetChunkMint(t) +} + +// Check whether the specified time (t) is within the chunk range +func (a *attrAppender) inRange(t int64) bool { + return a.partition.InChunkRange(a.chunkMint, t) +} + +// Check whether the specified time (t) is ahead of the chunk range +func (a *attrAppender) isAhead(t int64) bool { + return a.partition.IsAheadOfChunk(a.chunkMint, t) +} + +// Append a single t/v pair to a chunk +func (a *attrAppender) appendAttr(t int64, v interface{}) { + a.appender.Append(t, v) +} + +// struct/list storing uncommitted samples, with time sorting support +type pendingData struct { + t int64 + v interface{} +} + +type pendingList []pendingData + +func (l pendingList) Len() int { return len(l) } +func (l pendingList) Less(i, j int) bool { return l[i].t < l[j].t } +func (l pendingList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +// Read (async) the current chunk state and data from the storage, used in the first chunk access +func (cs *chunkStore) getChunksState(mc *MetricsCache, metric *MetricState) (bool, error) { + + if len(cs.pending) == 0 { + return false, nil + } + // Init chunk and create an aggregates-list object based on the partition policy + t := cs.pending[0].t + part, err := mc.partitionMngr.TimeToPart(t) + if err != nil { + return false, err + } + if !cs.isAggr() { + cs.chunks[0].initialize(part, t) + } + cs.nextTid = t + cs.aggrList = aggregate.NewAggregatesList(part.AggrType()) + + // TODO: if policy to merge w old chunks needs to get prev chunk, vs restart appender + + // Issue a GetItem command to the DB to load last state of metric + path := part.GetMetricPath(metric.name, metric.hash, cs.labelNames, cs.isAggr()) + getInput := v3io.GetItemInput{ + Path: path, AttributeNames: []string{config.MaxTimeAttrName}} + + request, err := mc.container.GetItem(&getInput, metric, mc.responseChan) + if err != nil { + mc.logger.ErrorWith("Failed to send a GetItem request to the TSDB", "metric", metric.key, "err", err) + return false, err + } + + mc.logger.DebugWith("Get metric state", "name", metric.name, "key", metric.key, "reqid", request.ID) + return true, nil +} + +// Process the GetItem response from the DB and initialize or restore the current chunk +func (cs *chunkStore) processGetResp(mc *MetricsCache, metric *MetricState, resp *v3io.Response) { + + if !cs.isAggr() { + // TODO: init based on schema, use init function, recover old state vs append based on policy + chunk := chunkenc.NewChunk(cs.logger, metric.isVariant) + app, _ := chunk.Appender() + cs.chunks[0].appender = app + cs.chunks[0].state |= chunkStateFirst + } + + latencyNano := time.Now().UnixNano() - resp.Request().SendTimeNanoseconds + cs.performanceReporter.UpdateHistogram("UpdateMetricLatencyHistogram", latencyNano) + + if resp.Error != nil { + if utils.IsNotExistsError(resp.Error) { + if metric.newName { + path := filepath.Join(mc.cfg.TablePath, config.NamesDirectory, metric.name) + putInput := v3io.PutItemInput{Path: path, Attributes: map[string]interface{}{}} + + request, err := mc.container.PutItem(&putInput, metric, mc.nameUpdateChan) + if err != nil { + cs.performanceReporter.IncrementCounter("PutNameError", 1) + mc.logger.ErrorWith("Update-name PutItem failed", "metric", metric.key, "err", err) + } else { + mc.logger.DebugWith("Update name", "name", metric.name, "key", metric.key, "reqid", request.ID) + } + } + } else { + mc.logger.Error("Update metric has failed with error: %v", resp.Error) + cs.performanceReporter.IncrementCounter("UpdateMetricError", 1) + } + + return + } + + // Check and update the metric item's end time (maxt) timestamp, allow continuing from the last point in case of failure + item := resp.Output.(*v3io.GetItemOutput).Item + var maxTime int64 + val := item[config.MaxTimeAttrName] + if val != nil { + maxTime = int64(val.(int)) + } + mc.logger.DebugWith("Got metric item", "name", metric.name, "key", metric.key, "maxt", maxTime) + + if !mc.cfg.OverrideOld { + cs.maxTime = maxTime + } + + if !cs.isAggr() { + if cs.chunks[0].inRange(maxTime) && !mc.cfg.OverrideOld { + cs.chunks[0].state |= chunkStateMerge + } + } + // Set Last TableId - indicate that there is no need to create metric object + cs.lastTid = cs.nextTid +} + +// Append data to the right chunk and table based on the time and state +func (cs *chunkStore) Append(t int64, v interface{}) { + if metricReporter, err := performance.DefaultReporterInstance(); err == nil { + metricReporter.IncrementCounter("AppendCounter", 1) + } + + cs.pending = append(cs.pending, pendingData{t: t, v: v}) + // If the new time is older than previous times, sort the list + if len(cs.pending) > 1 && cs.pending[len(cs.pending)-2].t > t { + sort.Sort(cs.pending) + } +} + +// Return current, previous, or create new chunk based on sample time +func (cs *chunkStore) chunkByTime(t int64, isVariantEncoding bool) (*attrAppender, error) { + + // Sample is in the current chunk + cur := cs.chunks[cs.curChunk] + if cur.inRange(t) { + return cur, nil + } + + // Sample is in the next chunk, need to initialize + if cur.isAhead(t) { + // Time is ahead of this chunk time, advance the current chunk + part := cur.partition + cur = cs.chunks[cs.curChunk^1] + + chunk := chunkenc.NewChunk(cs.logger, isVariantEncoding) // TODO: init based on schema, use init function + app, err := chunk.Appender() + if err != nil { + return nil, err + } + nextPart, err := part.NextPart(t) + if err != nil { + return nil, err + } + cur.initialize(nextPart, t) + cs.nextTid = t + cur.appender = app + cs.curChunk = cs.curChunk ^ 1 + + return cur, nil + } + + // If it's the first chunk after init we don't allow old updates + if (cur.state & chunkStateFirst) != 0 { + return nil, nil + } + + prev := cs.chunks[cs.curChunk^1] + // Delayed appends - only allowed to previous chunk or within allowed window + if prev.partition != nil && prev.inRange(t) && t > cs.maxTime-maxLateArrivalInterval { + return prev, nil + } + + return nil, nil +} + +// Write all pending samples to DB chunks and aggregates +func (cs *chunkStore) writeChunks(mc *MetricsCache, metric *MetricState) (hasPendingUpdates bool, err error) { + cs.performanceReporter.WithTimer("WriteChunksTimer", func() { + // Return if there are no pending updates + if len(cs.pending) == 0 { + hasPendingUpdates, err = false, nil + return + } + + expr := "" + notInitialized := false + + // Init the partition info and find whether we need to init the metric headers (labels, ..) in the case of a new partition + t0 := cs.pending[0].t + partition, err := mc.partitionMngr.TimeToPart(t0) + if err != nil { + hasPendingUpdates = false + return + } + if partition.GetStartTime() > cs.lastTid { + notInitialized = true + cs.lastTid = partition.GetStartTime() + } + + // Init the aggregation-buckets info + bucket := partition.Time2Bucket(t0) + numBuckets := partition.AggrBuckets() + isNewBucket := bucket > partition.Time2Bucket(cs.maxTime) + + var activeChunk *attrAppender + var pendingSampleIndex int + var pendingSamplesCount int + + // Loop over pending samples, add to chunks & aggregates (create required update expressions) + for pendingSampleIndex < len(cs.pending) && pendingSamplesCount < mc.cfg.BatchSize && partition.InRange(cs.pending[pendingSampleIndex].t) { + sampleTime := cs.pending[pendingSampleIndex].t + + if sampleTime <= cs.maxTime && !mc.cfg.OverrideOld { + mc.logger.WarnWith("Omitting the sample - time is earlier than the last sample time for this metric", "metric", metric.Lset, "T", sampleTime) + + // If we have reached the end of the pending events and there are events to update, create an update expression and break from loop, + // Otherwise, discard the event and continue normally + if pendingSampleIndex == len(cs.pending)-1 { + if pendingSamplesCount > 0 { + expr = expr + cs.aggrList.SetOrUpdateExpr("v", bucket, isNewBucket) + expr = expr + cs.appendExpression(activeChunk) + } + pendingSampleIndex++ + break + } else { + pendingSampleIndex++ + continue + } + } + + // Init activeChunk if nil (when samples are too old); if still too + // old, skip to next sample + if !cs.isAggr() && activeChunk == nil { + activeChunk, err = cs.chunkByTime(sampleTime, metric.isVariant) + if err != nil { + hasPendingUpdates = false + return + } + if activeChunk == nil { + pendingSampleIndex++ + mc.logger.DebugWith("nil active chunk", "T", sampleTime) + continue + } + } + + // Advance maximum time processed in metric + if sampleTime > cs.maxTime { + cs.maxTime = sampleTime + } + + // Add a value to the aggregates list + cs.aggrList.Aggregate(sampleTime, cs.pending[pendingSampleIndex].v) + + if activeChunk != nil { + // Add a value to the compressed raw-values chunk + activeChunk.appendAttr(sampleTime, cs.pending[pendingSampleIndex].v) + } + + // If this is the last item or last item in the same partition, add + // expressions and break + if (pendingSampleIndex == len(cs.pending)-1) || pendingSamplesCount == mc.cfg.BatchSize-1 || !partition.InRange(cs.pending[pendingSampleIndex+1].t) { + expr = expr + cs.aggrList.SetOrUpdateExpr("v", bucket, isNewBucket) + expr = expr + cs.appendExpression(activeChunk) + pendingSampleIndex++ + pendingSamplesCount++ + break + } + + // If the next item is in new Aggregate bucket, generate an + // expression and initialize the new bucket + nextT := cs.pending[pendingSampleIndex+1].t + nextBucket := partition.Time2Bucket(nextT) + if nextBucket != bucket { + expr = expr + cs.aggrList.SetOrUpdateExpr("v", bucket, isNewBucket) + cs.aggrList.Clear() + bucket = nextBucket + isNewBucket = true + } + + // If the next item is in a new chunk, generate an expression and + // initialize the new chunk + if activeChunk != nil && !activeChunk.inRange(nextT) { + expr = expr + cs.appendExpression(activeChunk) + activeChunk, err = cs.chunkByTime(nextT, metric.isVariant) + if err != nil { + hasPendingUpdates = false + return + } + } + + pendingSampleIndex++ + pendingSamplesCount++ + } + + cs.aggrList.Clear() + if pendingSampleIndex == len(cs.pending) { + cs.pending = cs.pending[:0] + } else { + // Leave pending unprocessed or from newer partitions + cs.pending = cs.pending[pendingSampleIndex:] + } + + if pendingSamplesCount == 0 || expr == "" { + if len(cs.pending) > 0 { + mc.metricQueue.Push(metric) + } + hasPendingUpdates = false + return + } + + // If the table object wasn't initialized, insert an init expression + if notInitialized { + // Initialize label (dimension) attributes + lblexpr := metric.Lset.GetExpr() + + // Initialize aggregate arrays + lblexpr = lblexpr + cs.aggrList.InitExpr("v", numBuckets) + + var encodingExpr string + if !cs.isAggr() { + encodingExpr = fmt.Sprintf("%s='%d'; ", config.EncodingAttrName, activeChunk.appender.Encoding()) + } + lsetExpr := fmt.Sprintf("%s='%s'; ", config.LabelSetAttrName, metric.key) + expr = lblexpr + encodingExpr + lsetExpr + expr + } + + conditionExpr := "" + + // Only add the condition when adding to a data chunk, not when writing data to label pre-aggregated + if activeChunk != nil { + // Call the V3IO async UpdateItem method + conditionExpr = fmt.Sprintf("NOT exists(%s) OR (exists(%s) AND %s == '%d')", + config.EncodingAttrName, config.EncodingAttrName, + config.EncodingAttrName, activeChunk.appender.Encoding()) + } + expr += fmt.Sprintf("%v=%d;", config.MaxTimeAttrName, cs.maxTime) // TODO: use max() expr + path := partition.GetMetricPath(metric.name, metric.hash, cs.labelNames, cs.isAggr()) + request, err := mc.container.UpdateItem( + &v3io.UpdateItemInput{Path: path, Expression: &expr, Condition: conditionExpr}, metric, mc.responseChan) + if err != nil { + mc.logger.ErrorWith("UpdateItem failed", "err", err) + hasPendingUpdates = false + } + + // Add the async request ID to the requests map (can be avoided if V3IO + // will add user data in request) + mc.logger.DebugWith("Update-metric expression", "name", metric.name, "key", metric.key, "expr", expr, "reqid", request.ID) + + hasPendingUpdates = true + cs.performanceReporter.UpdateHistogram("WriteChunksSizeHistogram", int64(pendingSamplesCount)) + return + }) + + return +} + +// Process the (async) response for the chunk update request +func (cs *chunkStore) ProcessWriteResp() { + + for _, chunk := range cs.chunks { + // Update the chunk state (if it was written to) + if chunk.state&chunkStateWriting != 0 { + chunk.state |= chunkStateCommitted + chunk.state &^= chunkStateWriting + chunk.appender.Chunk().Clear() + } + } +} + +// Return the chunk's update expression +func (cs *chunkStore) appendExpression(chunk *attrAppender) string { + + if chunk != nil { + bytes := chunk.appender.Chunk().Bytes() + chunk.state |= chunkStateWriting + + expr := "" + idx, err := chunk.partition.TimeToChunkID(chunk.chunkMint) + if err != nil { + return "" + } + attr := chunk.partition.ChunkID2Attr("v", idx) + + val := base64.StdEncoding.EncodeToString(bytes) + + // Overwrite, merge, or append based on the chunk state + if chunk.state&chunkStateCommitted != 0 || chunk.state&chunkStateMerge != 0 { + expr = fmt.Sprintf("%s=if_not_exists(%s,blob('')) + blob('%s'); ", attr, attr, val) + } else { + expr = fmt.Sprintf("%s=blob('%s'); ", attr, val) + } + + return expr + + } + + return "" +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/bstream.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/bstream.go new file mode 100644 index 00000000..5452cc5d --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/bstream.go @@ -0,0 +1,250 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. + +The code in this file was largely written by Damian Gryski as part of +https://github.com/dgryski/go-tsz and published under the license below. +and was later on modified by the Prometheus project in +https://github.com/prometheus/prometheus +Which are licensed under the Apache License, Version 2.0 (the "License"); + +Followed by modifications found here to suit Iguazio needs + +Copyright (c) 2015,2016 Damian Gryski +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +package chunkenc + +import ( + "io" +) + +// bstream is a stream of bits. +type bstream struct { + stream []byte // the data stream + count uint8 // how many bits are valid in current byte +} + +func newBReader(b []byte) *bstream { + return &bstream{stream: b, count: 8} +} + +func newBWriter(size int) *bstream { + return &bstream{stream: make([]byte, 0, size), count: 0} +} + +func (b *bstream) clone() *bstream { + d := make([]byte, len(b.stream)) + copy(d, b.stream) + return &bstream{stream: d, count: b.count} +} + +func (b *bstream) bytes() []byte { + if b.count == 8 { + return b.stream[0 : len(b.stream)-1] + } + return b.stream +} + +type bit bool + +const ( + zero bit = false + one bit = true +) + +func (b *bstream) padToByte() { + if b.count != 8 { + b.count = 0 + } +} + +func (b *bstream) clear() { + b.stream = b.stream[:0] + b.count = 0 +} + +func (b *bstream) writeBit(bit bit) { + if b.count == 0 { + b.stream = append(b.stream, 0) + b.count = 8 + } + + i := len(b.stream) - 1 + + if bit { + b.stream[i] |= 1 << (b.count - 1) + } + + b.count-- +} + +func (b *bstream) writeByte(byt byte) { + if b.count == 0 { + b.stream = append(b.stream, 0) + b.count = 8 + } + + i := len(b.stream) - 1 + + // fill up b.b with b.count bits from byt + b.stream[i] |= byt >> (8 - b.count) + + b.stream = append(b.stream, 0) + i++ + b.stream[i] = byt << b.count +} + +func (b *bstream) writeBits(u uint64, nbits int) { + u <<= (64 - uint(nbits)) + for nbits >= 8 { + byt := byte(u >> 56) + b.writeByte(byt) + u <<= 8 + nbits -= 8 + } + + for nbits > 0 { + b.writeBit((u >> 63) == 1) + u <<= 1 + nbits-- + } +} + +func (b *bstream) readBit() (bit, error) { + if len(b.stream) == 0 { + return false, io.EOF + } + + if b.count == 0 { + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return false, io.EOF + } + b.count = 8 + } + + d := (b.stream[0] << (8 - b.count)) & 0x80 + b.count-- + return d != 0, nil +} + +func (b *bstream) ReadByte() (byte, error) { + return b.readByte() +} + +// read one byte without moving the cursor +func (b *bstream) PeekByte() byte { + if b.count == 0 { + if len(b.stream) < 1 { + return 0 + } + return b.stream[1] + } + + return b.stream[0] +} + +func (b *bstream) readByte() (byte, error) { + if len(b.stream) == 0 { + return 0, io.EOF + } + + if b.count == 0 { + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return 0, io.EOF + } + return b.stream[0], nil + } + + if b.count == 8 { + b.count = 0 + return b.stream[0], nil + } + + byt := b.stream[0] << (8 - b.count) + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return 0, io.EOF + } + + // We just advanced the stream and can assume the shift to be 0. + byt |= b.stream[0] >> b.count + + return byt, nil +} + +func (b *bstream) readBits(nbits int) (uint64, error) { + var u uint64 + + for nbits >= 8 { + byt, err := b.readByte() + if err != nil { + return 0, err + } + + u = (u << 8) | uint64(byt) + nbits -= 8 + } + + if nbits == 0 { + return u, nil + } + + if nbits > int(b.count) { + u = (u << uint(b.count)) | uint64((b.stream[0]<<(8-b.count))>>(8-b.count)) + nbits -= int(b.count) + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return 0, io.EOF + } + b.count = 8 + } + + u = (u << uint(nbits)) | uint64((b.stream[0]<<(8-b.count))>>(8-uint(nbits))) + b.count -= uint8(nbits) + return u, nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunk.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunk.go new file mode 100644 index 00000000..62e0ee3d --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunk.go @@ -0,0 +1,111 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. + +The code in this file was largely written by Prometheus Authors as part of +https://github.com/prometheus/prometheus +Copyright 2017 The Prometheus Authors +And is also licensed under the Apache License, Version 2.0; + +And was modified to suit Iguazio needs + +*/ + +package chunkenc + +import ( + "fmt" + + "github.com/nuclio/logger" +) + +// Encoding is the identifier for chunk encoding. +type Encoding uint8 + +func (e Encoding) String() string { + switch e { + case EncNone: + return "none" + case EncXOR: + return "XOR" + case EncVariant: + return "Variant" + } + return "" +} + +// Available chunk encodings +const ( + EncNone Encoding = 0 + EncXOR Encoding = 1 + EncVariant Encoding = 2 +) + +// Chunk holds a sequence of sample pairs that can be iterated over and appended to. +type Chunk interface { + Bytes() []byte + Clear() + Encoding() Encoding + Appender() (Appender, error) + Iterator() Iterator +} + +func NewChunk(logger logger.Logger, variant bool) Chunk { + if variant { + return newVarChunk(logger) + } + return newXORChunk(logger) +} + +// FromData returns a chunk from a byte slice of chunk data. +func FromData(logger logger.Logger, e Encoding, d []byte, samples uint16) (Chunk, error) { + switch e { + case EncXOR: + return &XORChunk{logger: logger, b: &bstream{count: 0, stream: d}, samples: samples}, nil + case EncVariant: + return &VarChunk{logger: logger, b: d, samples: samples}, nil + } + return nil, fmt.Errorf("Unknown chunk encoding: %d", e) +} + +// Appender adds metric-sample pairs to a chunk. +type Appender interface { + Append(int64, interface{}) + Chunk() Chunk + Encoding() Encoding +} + +// Iterator is a simple iterator that can only get the next value. +type Iterator interface { + At() (int64, float64) + AtString() (int64, string) + Err() error + Next() bool +} + +// NewNopIterator returns a new chunk iterator that doesn't hold any data. +func NewNopIterator() Iterator { + return nopIterator{} +} + +type nopIterator struct{} + +func (nopIterator) At() (int64, float64) { return 0, 0 } +func (nopIterator) AtString() (int64, string) { return 0, "" } +func (nopIterator) Next() bool { return false } +func (nopIterator) Err() error { return nil } diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunkenc_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunkenc_test.go new file mode 100644 index 00000000..9b269fa8 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/chunkenc_test.go @@ -0,0 +1,158 @@ +// +build unit + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package chunkenc + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/nuclio/zap" + "github.com/stretchr/testify/assert" +) + +const basetime = 1524690488000 + +type sample struct { + t int64 + v float64 +} + +// [132 180 199 187 191 88 63 240 - 0 0 0 0 0 0 154 8 - 194 95 255 108 7 126 113 172 - 46 18 195 104 59 202 237 129 - 119 243 146] + +func TestXor(tst *testing.T) { + tst.Skip("Needs to be refactored - Doesn't test anything") + + samples := GenSamples(1000, 5, 1000, 100) + //samples := RealSample(1000) + var byteArray []byte + + logger, err := nucliozap.NewNuclioZapTest("test") + assert.Nil(tst, err) + + ch := newXORChunk(logger) + appender, err := ch.Appender() + if err != nil { + tst.Fatal(err) + } + + for i, s := range samples { + fmt.Println("t,v: ", s.t, s.v) + appender.Append(s.t, s.v) + b := ch.Bytes() + fmt.Println(b, len(b)) + byteArray = append(byteArray, b...) + ch.Clear() + if i == 4 { + fmt.Println("restarted appender") + ch = newXORChunk(logger) + appender, err = ch.Appender() + if err != nil { + tst.Fatal(err) + } + + } + } + + fmt.Println("Samples:", len(samples), "byteArray:", byteArray, len(byteArray)) + + ch2, err := FromData(logger, EncXOR, byteArray, 0) + if err != nil { + tst.Fatal(err) + } + + iter := ch2.Iterator() + i := 0 + for iter.Next() { + + if iter.Err() != nil { + tst.Fatal(iter.Err()) + } + + t, v := iter.At() + isMatch := t == samples[i].t && v == samples[i].v + fmt.Println("t, v, match: ", t, v, isMatch) + if !isMatch { + tst.Fatalf("iterator t or v doesnt match appended index %d len %d", i, len(samples)) + } + i++ + } + fmt.Println() + + if i != len(samples) { + tst.Fatalf("number of iterator samples (%d) != num of appended (%d)", i, len(samples)) + } + +} + +func TestBstream(t *testing.T) { + t.Skip("Needs to be refactored - Doesn't test anything") + + src := &bstream{count: 8, stream: []byte{0x55, 0x44, 0x33}} + + bs := newBWriter(8) + byt, _ := src.readByte() + bs.writeByte(byt) + fmt.Println(bs.count, bs.stream) + for i := 1; i < 18; i++ { + bit, _ := src.readBit() + fmt.Println(bs.count, bs.stream, bit) + bs.writeBit(bit) + } + + fmt.Println("Reading:") + bs2 := &bstream{count: 8, stream: bs.stream} + fmt.Println(bs2.count, bs2.stream) + for i := 1; i < 18; i++ { + bit, _ := bs2.readBit() + fmt.Println(bs2.count, bs2.stream, bit) + } + +} + +func GenSamples(num, interval int, start, step float64) []sample { + samples := []sample{} + curTime := int64(basetime) + v := start + + for i := 0; i <= num; i++ { + curTime += int64(interval * 1000) + t := curTime + int64(rand.Intn(100)) - 50 + v += float64(rand.Intn(100)-50) / 100 * step + //fmt.Printf("t-%d,v%.2f ", t, v) + samples = append(samples, sample{t: t, v: v}) + } + + return samples +} + +var timeList = []int64{1360281600000, 1360540800000, 1360627200000, 1360713600000, 1360800000000, 1360886400000, 1361232000000, 1361318400000, 1361404800000, 1361491200000, 1361750400000, 1361836800000, 1361923200000, 1362009600000, 1362096000000, 1362355200000, 1362441600000, 1362528000000, 1362614400000, 1362700800000, 1362960000000, 1363046400000, 1363132800000, 1363219200000, 1363305600000, 1363564800000, 1363651200000, 1363737600000, 1363824000000, 1363910400000, 1364169600000, 1364256000000, 1364342400000, 1364428800000, 1364774400000, 1364860800000, 1364947200000, 1365033600000, 1365120000000, 1365379200000, 1365465600000, 1365552000000, 1365638400000, 1365724800000, 1365984000000, 1366070400000, 1366156800000, 1366243200000, 1366329600000, 1366588800000, 1366675200000, 1366761600000, 1366848000000, 1366934400000, 1367193600000, 1367280000000, 1367366400000, 1367452800000, 1367539200000, 1367798400000, 1367884800000, 1367971200000, 1368057600000, 1368144000000, 1368403200000, 1368489600000, 1368576000000, 1368662400000, 1368748800000, 1369008000000, 1369094400000, 1369180800000, 1369267200000, 1369353600000, 1369699200000, 1369785600000, 1369872000000, 1369958400000, 1370217600000, 1370304000000, 1370390400000, 1370476800000, 1370563200000, 1370822400000, 1370908800000, 1370995200000, 1371081600000, 1371168000000, 1371427200000, 1371513600000, 1371600000000, 1371686400000, 1371772800000, 1372032000000, 1372118400000, 1372204800000, 1372291200000, 1372377600000, 1372636800000, 1372723200000, 1372809600000, 1372982400000, 1373241600000, 1373328000000, 1373414400000, 1373500800000, 1373587200000, 1373846400000, 1373932800000, 1374019200000, 1374105600000, 1374192000000, 1374451200000, 1374537600000, 1374624000000, 1374710400000, 1374796800000, 1375056000000, 1375142400000, 1375228800000, 1375315200000, 1375401600000, 1375660800000, 1375747200000, 1375833600000, 1375920000000, 1376006400000, 1376265600000, 1376352000000, 1376438400000, 1376524800000, 1376611200000, 1376870400000, 1376956800000, 1377043200000, 1377129600000, 1377216000000, 1377475200000, 1377561600000, 1377648000000, 1377734400000, 1377820800000, 1378166400000, 1378252800000, 1378339200000, 1378425600000, 1378684800000, 1378771200000, 1378857600000, 1378944000000, 1379030400000, 1379289600000, 1379376000000, 1379462400000, 1379548800000, 1379635200000, 1379894400000, 1379980800000, 1380067200000, 1380153600000, 1380240000000, 1380499200000, 1380585600000, 1380672000000, 1380758400000, 1380844800000, 1381104000000, 1381190400000, 1381276800000, 1381363200000, 1381449600000, 1381708800000, 1381795200000, 1381881600000, 1381968000000, 1382054400000, 1382313600000, 1382400000000, 1382486400000, 1382572800000, 1382659200000, 1382918400000, 1383004800000, 1383091200000, 1383177600000, 1383264000000, 1383523200000, 1383609600000, 1383696000000, 1383782400000, 1383868800000, 1384128000000, 1384214400000, 1384300800000, 1384387200000, 1384473600000, 1384732800000, 1384819200000, 1384905600000, 1384992000000, 1385078400000, 1385337600000, 1385424000000, 1385510400000, 1385683200000, 1385942400000, 1386028800000, 1386115200000, 1386201600000, 1386288000000, 1386547200000, 1386633600000, 1386720000000, 1386806400000, 1386892800000, 1387152000000, 1387238400000, 1387324800000, 1387411200000, 1387497600000, 1387756800000, 1387843200000, 1388016000000, 1388102400000, 1388361600000, 1388448000000, 1388620800000, 1388707200000, 1388966400000, 1389052800000, 1389139200000, 1389225600000, 1389312000000, 1389571200000, 1389657600000, 1389744000000, 1389830400000, 1389916800000, 1390262400000, 1390348800000, 1390435200000, 1390521600000, 1390780800000, 1390867200000, 1390953600000, 1391040000000, 1391126400000, 1391385600000, 1391472000000, 1391558400000, 1391644800000, 1391731200000, 1391990400000, 1392076800000, 1392163200000, 1392249600000, 1392336000000, 1392681600000, 1392768000000, 1392854400000, 1392940800000, 1393200000000, 1393286400000, 1393372800000, 1393459200000, 1393545600000, 1393804800000, 1393891200000, 1393977600000, 1394064000000, 1394150400000, 1394409600000, 1394496000000, 1394582400000, 1394668800000, 1394755200000, 1395014400000, 1395100800000, 1395187200000, 1395273600000, 1395360000000, 1395619200000, 1395705600000, 1395792000000, 1395878400000, 1395964800000, 1396224000000, 1396310400000, 1396396800000, 1396483200000, 1396569600000, 1396828800000, 1396915200000, 1397001600000, 1397088000000, 1397174400000, 1397433600000, 1397520000000, 1397606400000, 1397692800000, 1398038400000, 1398124800000, 1398211200000, 1398297600000, 1398384000000, 1398643200000, 1398729600000, 1398816000000, 1398902400000, 1398988800000, 1399248000000, 1399334400000, 1399420800000, 1399507200000, 1399593600000, 1399852800000, 1399939200000, 1400025600000, 1400112000000, 1400198400000, 1400457600000, 1400544000000, 1400630400000, 1400716800000, 1400803200000, 1401148800000, 1401235200000, 1401321600000, 1401408000000, 1401667200000, 1401753600000, 1401840000000, 1401926400000, 1402012800000, 1402272000000, 1402358400000, 1402444800000, 1402531200000, 1402617600000, 1402876800000, 1402963200000, 1403049600000, 1403136000000, 1403222400000, 1403481600000, 1403568000000, 1403654400000, 1403740800000, 1403827200000, 1404086400000, 1404172800000, 1404259200000, 1404345600000, 1404691200000, 1404777600000, 1404864000000, 1404950400000, 1405036800000, 1405296000000, 1405382400000, 1405468800000, 1405555200000, 1405641600000, 1405900800000, 1405987200000, 1406073600000, 1406160000000, 1406246400000, 1406505600000, 1406592000000, 1406678400000, 1406764800000, 1406851200000, 1407110400000, 1407196800000, 1407283200000, 1407369600000, 1407456000000, 1407715200000, 1407801600000, 1407888000000, 1407974400000, 1408060800000, 1408320000000, 1408406400000, 1408492800000, 1408579200000, 1408665600000, 1408924800000, 1409011200000, 1409097600000, 1409184000000, 1409270400000, 1409616000000, 1409702400000, 1409788800000, 1409875200000, 1410134400000, 1410220800000, 1410307200000, 1410393600000, 1410480000000, 1410739200000, 1410825600000, 1410912000000, 1410998400000, 1411084800000, 1411344000000, 1411430400000, 1411516800000, 1411603200000, 1411689600000, 1411948800000, 1412035200000, 1412121600000, 1412208000000, 1412294400000, 1412553600000, 1412640000000, 1412726400000, 1412812800000, 1412899200000, 1413158400000, 1413244800000, 1413331200000, 1413417600000, 1413504000000, 1413763200000, 1413849600000, 1413936000000, 1414022400000, 1414108800000, 1414368000000, 1414454400000, 1414540800000, 1414627200000, 1414713600000, 1414972800000, 1415059200000, 1415145600000, 1415232000000, 1415318400000, 1415577600000, 1415664000000, 1415750400000, 1415836800000, 1415923200000, 1416182400000, 1416268800000, 1416355200000, 1416441600000, 1416528000000, 1416787200000, 1416873600000, 1416960000000, 1417132800000, 1417392000000, 1417478400000, 1417564800000, 1417651200000, 1417737600000, 1417996800000, 1418083200000, 1418169600000, 1418256000000, 1418342400000, 1418601600000, 1418688000000, 1418774400000, 1418860800000, 1418947200000, 1419206400000, 1419292800000, 1419379200000, 1419552000000, 1419811200000, 1419897600000, 1419984000000, 1420156800000, 1420416000000, 1420502400000, 1420588800000, 1420675200000, 1420761600000, 1421020800000, 1421107200000, 1421193600000, 1421280000000, 1421366400000, 1421712000000, 1421798400000, 1421884800000, 1421971200000, 1422230400000, 1422316800000, 1422403200000, 1422489600000, 1422576000000, 1422835200000, 1422921600000, 1423008000000, 1423094400000, 1423180800000, 1423440000000, 1423526400000, 1423612800000, 1423699200000, 1423785600000, 1424131200000, 1424217600000, 1424304000000, 1424390400000, 1424649600000, 1424736000000, 1424822400000, 1424908800000, 1424995200000, 1425254400000, 1425340800000, 1425427200000, 1425513600000, 1425600000000, 1425859200000, 1425945600000, 1426032000000, 1426118400000, 1426204800000, 1426464000000, 1426550400000, 1426636800000, 1426723200000, 1426809600000, 1427068800000, 1427155200000, 1427241600000, 1427328000000, 1427414400000, 1427673600000, 1427760000000, 1427846400000, 1427932800000, 1428278400000, 1428364800000, 1428451200000, 1428537600000, 1428624000000, 1428883200000, 1428969600000, 1429056000000, 1429142400000, 1429228800000, 1429488000000, 1429574400000, 1429660800000, 1429747200000, 1429833600000, 1430092800000, 1430179200000, 1430265600000, 1430352000000, 1430438400000, 1430697600000, 1430784000000, 1430870400000, 1430956800000, 1431043200000, 1431302400000, 1431388800000, 1431475200000, 1431561600000, 1431648000000, 1431907200000, 1431993600000, 1432080000000, 1432166400000, 1432252800000, 1432598400000, 1432684800000, 1432771200000, 1432857600000, 1433116800000, 1433203200000, 1433289600000, 1433376000000, 1433462400000, 1433721600000, 1433808000000, 1433894400000, 1433980800000, 1434067200000, 1434326400000, 1434412800000, 1434499200000, 1434585600000, 1434672000000, 1434931200000, 1435017600000, 1435104000000, 1435190400000, 1435276800000, 1435536000000, 1435622400000, 1435708800000, 1435795200000, 1436140800000, 1436227200000, 1436313600000, 1436400000000, 1436486400000, 1436745600000, 1436832000000, 1436918400000, 1437004800000, 1437091200000, 1437350400000, 1437436800000, 1437523200000, 1437609600000, 1437696000000, 1437955200000, 1438041600000, 1438128000000, 1438214400000, 1438300800000, 1438560000000, 1438646400000, 1438732800000, 1438819200000, 1438905600000, 1439164800000, 1439251200000, 1439337600000, 1439424000000, 1439510400000, 1439769600000, 1439856000000, 1439942400000, 1440028800000, 1440115200000, 1440374400000, 1440460800000, 1440547200000, 1440633600000, 1440720000000, 1440979200000, 1441065600000, 1441152000000, 1441238400000, 1441324800000, 1441670400000, 1441756800000, 1441843200000, 1441929600000, 1442188800000, 1442275200000, 1442361600000, 1442448000000, 1442534400000, 1442793600000, 1442880000000, 1442966400000, 1443052800000, 1443139200000, 1443398400000, 1443484800000, 1443571200000, 1443657600000, 1443744000000, 1444003200000, 1444089600000, 1444176000000, 1444262400000, 1444348800000, 1444608000000, 1444694400000, 1444780800000, 1444867200000, 1444953600000, 1445212800000, 1445299200000, 1445385600000, 1445472000000, 1445558400000, 1445817600000, 1445904000000, 1445990400000, 1446076800000, 1446163200000, 1446422400000, 1446508800000, 1446595200000, 1446681600000, 1446768000000, 1447027200000, 1447113600000, 1447200000000, 1447286400000, 1447372800000, 1447632000000, 1447718400000, 1447804800000, 1447891200000, 1447977600000, 1448236800000, 1448323200000, 1448409600000, 1448582400000, 1448841600000, 1448928000000, 1449014400000, 1449100800000, 1449187200000, 1449446400000, 1449532800000, 1449619200000, 1449705600000, 1449792000000, 1450051200000, 1450137600000, 1450224000000, 1450310400000, 1450396800000, 1450656000000, 1450742400000, 1450828800000, 1450915200000, 1451260800000, 1451347200000, 1451433600000, 1451520000000, 1451865600000, 1451952000000, 1452038400000, 1452124800000, 1452211200000, 1452470400000, 1452556800000, 1452643200000, 1452729600000, 1452816000000, 1453161600000, 1453248000000, 1453334400000, 1453420800000, 1453680000000, 1453766400000, 1453852800000, 1453939200000, 1454025600000, 1454284800000, 1454371200000, 1454457600000, 1454544000000, 1454630400000, 1454889600000, 1454976000000, 1455062400000, 1455148800000, 1455235200000, 1455580800000, 1455667200000, 1455753600000, 1455840000000, 1456099200000, 1456185600000, 1456272000000, 1456358400000, 1456444800000, 1456704000000, 1456790400000, 1456876800000, 1456963200000, 1457049600000, 1457308800000, 1457395200000, 1457481600000, 1457568000000, 1457654400000, 1457913600000, 1458000000000, 1458086400000, 1458172800000, 1458259200000, 1458518400000, 1458604800000, 1458691200000, 1458777600000, 1459123200000, 1459209600000, 1459296000000, 1459382400000, 1459468800000, 1459728000000, 1459814400000, 1459900800000, 1459987200000, 1460073600000, 1460332800000, 1460419200000, 1460505600000, 1460592000000, 1460678400000, 1460937600000, 1461024000000, 1461110400000, 1461196800000, 1461283200000, 1461542400000, 1461628800000, 1461715200000, 1461801600000, 1461888000000, 1462147200000, 1462233600000, 1462320000000, 1462406400000, 1462492800000, 1462752000000, 1462838400000, 1462924800000, 1463011200000, 1463097600000, 1463356800000, 1463443200000, 1463529600000, 1463616000000, 1463702400000, 1463961600000, 1464048000000, 1464134400000, 1464220800000, 1464307200000, 1464652800000, 1464739200000, 1464825600000, 1464912000000, 1465171200000, 1465257600000, 1465344000000, 1465430400000, 1465516800000, 1465776000000, 1465862400000, 1465948800000, 1466035200000, 1466121600000, 1466380800000, 1466467200000, 1466553600000, 1466640000000, 1466726400000, 1466985600000, 1467072000000, 1467158400000, 1467244800000, 1467331200000, 1467676800000, 1467763200000, 1467849600000, 1467936000000, 1468195200000, 1468281600000, 1468368000000, 1468454400000, 1468540800000, 1468800000000, 1468886400000, 1468972800000, 1469059200000, 1469145600000, 1469404800000, 1469491200000, 1469577600000, 1469664000000, 1469750400000, 1470009600000, 1470096000000, 1470182400000, 1470268800000, 1470355200000, 1470614400000, 1470700800000, 1470787200000, 1470873600000, 1470960000000, 1471219200000, 1471305600000, 1471392000000, 1471478400000, 1471564800000, 1471824000000, 1471910400000, 1471996800000, 1472083200000, 1472169600000, 1472428800000, 1472515200000, 1472601600000, 1472688000000, 1472774400000, 1473120000000, 1473206400000, 1473292800000, 1473379200000, 1473638400000, 1473724800000, 1473811200000, 1473897600000, 1473984000000, 1474243200000, 1474329600000, 1474416000000, 1474502400000, 1474588800000, 1474848000000, 1474934400000, 1475020800000, 1475107200000, 1475193600000, 1475452800000, 1475539200000, 1475625600000, 1475712000000, 1475798400000, 1476057600000, 1476144000000, 1476230400000, 1476316800000, 1476403200000, 1476662400000, 1476748800000, 1476835200000, 1476921600000, 1477008000000, 1477267200000, 1477353600000, 1477440000000, 1477526400000, 1477612800000, 1477872000000, 1477958400000, 1478044800000, 1478131200000, 1478217600000, 1478476800000, 1478563200000, 1478649600000, 1478736000000, 1478822400000, 1479081600000, 1479168000000, 1479254400000, 1479340800000, 1479427200000, 1479686400000, 1479772800000, 1479859200000, 1480032000000, 1480291200000, 1480377600000, 1480464000000, 1480550400000, 1480636800000, 1480896000000, 1480982400000, 1481068800000, 1481155200000, 1481241600000, 1481500800000, 1481587200000, 1481673600000, 1481760000000, 1481846400000, 1482105600000, 1482192000000, 1482278400000, 1482364800000, 1482451200000, 1482796800000, 1482883200000, 1482969600000, 1483056000000, 1483401600000, 1483488000000, 1483574400000, 1483660800000, 1483920000000, 1484006400000, 1484092800000, 1484179200000, 1484265600000, 1484611200000, 1484697600000, 1484784000000, 1484870400000, 1485129600000, 1485216000000, 1485302400000, 1485388800000, 1485475200000, 1485734400000, 1485820800000, 1485907200000, 1485993600000, 1486080000000, 1486339200000, 1486425600000, 1486512000000, 1486598400000, 1486684800000, 1486944000000, 1487030400000, 1487116800000, 1487203200000, 1487289600000, 1487635200000, 1487721600000, 1487808000000, 1487894400000, 1488153600000, 1488240000000, 1488326400000, 1488412800000, 1488499200000, 1488758400000, 1488844800000, 1488931200000, 1489017600000, 1489104000000, 1489363200000, 1489449600000, 1489536000000, 1489622400000, 1489708800000, 1489968000000, 1490054400000, 1490140800000, 1490227200000, 1490313600000, 1490572800000, 1490659200000, 1490745600000, 1490832000000, 1490918400000, 1491177600000, 1491264000000, 1491350400000, 1491436800000, 1491523200000, 1491782400000, 1491868800000, 1491955200000, 1492041600000, 1492387200000, 1492473600000, 1492560000000, 1492646400000, 1492732800000, 1492992000000, 1493078400000, 1493164800000, 1493251200000, 1493337600000, 1493596800000, 1493683200000, 1493769600000, 1493856000000, 1493942400000, 1494201600000, 1494288000000, 1494374400000, 1494460800000, 1494547200000, 1494806400000, 1494892800000, 1494979200000, 1495065600000, 1495152000000, 1495411200000, 1495497600000, 1495584000000, 1495670400000, 1495756800000, 1496102400000, 1496188800000, 1496275200000, 1496361600000, 1496620800000, 1496707200000, 1496793600000, 1496880000000, 1496966400000, 1497225600000, 1497312000000, 1497398400000, 1497484800000, 1497571200000, 1497830400000, 1497916800000, 1498003200000, 1498089600000, 1498176000000, 1498435200000, 1498521600000, 1498608000000, 1498694400000, 1498780800000, 1499040000000, 1499212800000, 1499299200000, 1499385600000, 1499644800000, 1499731200000, 1499817600000, 1499904000000, 1499990400000, 1500249600000, 1500336000000, 1500422400000, 1500508800000, 1500595200000, 1500854400000, 1500940800000, 1501027200000, 1501113600000, 1501200000000, 1501459200000, 1501545600000, 1501632000000, 1501718400000, 1501804800000, 1502064000000, 1502150400000, 1502236800000, 1502323200000, 1502409600000, 1502668800000, 1502755200000, 1502841600000, 1502928000000, 1503014400000, 1503273600000, 1503360000000, 1503446400000, 1503532800000, 1503619200000, 1503878400000, 1503964800000, 1504051200000, 1504137600000, 1504224000000, 1504569600000, 1504656000000, 1504742400000, 1504828800000, 1505088000000, 1505174400000, 1505260800000, 1505347200000, 1505433600000, 1505692800000, 1505779200000, 1505865600000, 1505952000000, 1506038400000, 1506297600000, 1506384000000, 1506470400000, 1506556800000, 1506643200000, 1506902400000, 1506988800000, 1507075200000, 1507161600000, 1507248000000, 1507507200000, 1507593600000, 1507680000000, 1507766400000, 1507852800000, 1508112000000, 1508198400000, 1508284800000, 1508371200000, 1508457600000, 1508716800000, 1508803200000, 1508889600000, 1508976000000, 1509062400000, 1509321600000, 1509408000000, 1509494400000, 1509580800000, 1509667200000, 1509926400000, 1510012800000, 1510099200000, 1510185600000, 1510272000000, 1510531200000, 1510617600000, 1510704000000, 1510790400000, 1510876800000, 1511136000000, 1511222400000, 1511308800000, 1511481600000, 1511740800000, 1511827200000, 1511913600000, 1512000000000, 1512086400000, 1512345600000, 1512432000000, 1512518400000, 1512604800000, 1512691200000, 1512950400000, 1513036800000, 1513123200000, 1513209600000, 1513296000000, 1513555200000, 1513641600000, 1513728000000, 1513814400000, 1513900800000, 1514246400000, 1514332800000, 1514419200000, 1514505600000, 1514851200000, 1514937600000, 1515024000000, 1515110400000, 1515369600000, 1515456000000, 1515542400000, 1515628800000, 1515715200000, 1516060800000, 1516147200000, 1516233600000, 1516320000000, 1516579200000, 1516665600000, 1516752000000, 1516838400000, 1516924800000, 1517184000000, 1517270400000, 1517356800000, 1517443200000, 1517529600000, 1517788800000, 1517875200000, 1517961600000} +var valList = []float64{27.260000, 27.405000, 27.370000, 27.370000, 27.610000, 27.400000, 27.290000, 27.815000, 26.810000, 28.230000, 30.130000, 29.455000, 30.370000, 31.250000, 30.900000, 31.550000, 31.865000, 31.310000, 31.250000, 32.485000, 32.295000, 33.000000, 32.560000, 32.925000, 34.020000, 33.115000, 33.940000, 34.165000, 33.750000, 34.000000, 34.135000, 33.495000, 33.630000, 33.850000, 33.485000, 33.720000, 33.265000, 32.475000, 32.120000, 34.215000, 34.340000, 35.375000, 35.365000, 34.680000, 33.525000, 32.277500, 32.225000, 32.310000, 32.895000, 32.325000, 32.905000, 33.125000, 33.710000, 34.245000, 33.965000, 34.060000, 33.785000, 33.260000, 33.420000, 33.655000, 34.055000, 33.985000, 33.910000, 33.740000, 33.345000, 33.415000, 34.120000, 34.265000, 33.990000, 35.155000, 36.215000, 35.620000, 34.905000, 35.810000, 36.355000, 36.000000, 36.100000, 35.840000, 35.250000, 35.365000, 35.120000, 34.905000, 35.525000, 36.310000, 35.260000, 35.510000, 34.655000, 35.345000, 35.235000, 35.700000, 36.115000, 35.105000, 34.685000, 33.645000, 34.965000, 35.520000, 35.885000, 35.180000, 35.840000, 35.400000, 35.540000, 36.090000, 35.725000, 36.010000, 35.995000, 36.205000, 35.455000, 35.415000, 35.145000, 34.790000, 35.100000, 36.475000, 36.670000, 36.495000, 36.225000, 37.505000, 38.285000, 38.740000, 37.990000, 38.540000, 38.440000, 38.670000, 38.540000, 38.525000, 38.580000, 38.015000, 37.825000, 37.945000, 37.675000, 37.710000, 36.970000, 37.550000, 37.230000, 37.000000, 37.620000, 37.680000, 38.730000, 39.000000, 38.440000, 38.740000, 39.520000, 39.350000, 39.770000, 39.720000, 39.650000, 38.500000, 38.980000, 39.000000, 38.760000, 38.910000, 38.040000, 37.950000, 37.740000, 38.070000, 38.470000, 38.070000, 37.510000, 37.270000, 36.620000, 36.430000, 37.010000, 36.670000, 37.320000, 36.650000, 36.040000, 35.760000, 35.850000, 35.850000, 35.090000, 36.190000, 36.080000, 36.360000, 36.660000, 36.730000, 37.550000, 36.620000, 36.250000, 36.180000, 35.680000, 34.840000, 35.950000, 37.040000, 37.080000, 36.470000, 35.430000, 35.240000, 35.220000, 34.860000, 35.000000, 34.000000, 32.730000, 33.660000, 33.600000, 33.290000, 33.660000, 33.290000, 33.600000, 32.770000, 33.950000, 34.330000, 34.890000, 35.280000, 34.680000, 34.270000, 34.470000, 34.570000, 34.500000, 34.480000, 34.130000, 36.260000, 36.540000, 36.900000, 36.270000, 35.810000, 36.490000, 36.190100, 35.770000, 36.040000, 37.150000, 38.600000, 38.540000, 38.560000, 38.970000, 38.560000, 39.020000, 38.670000, 38.570000, 38.170000, 38.150000, 38.400000, 38.730000, 38.930000, 37.430000, 37.570000, 36.550000, 37.390000, 37.870000, 38.280100, 37.950000, 39.590000, 40.060000, 39.580000, 39.550000, 39.540000, 39.620000, 40.560000, 39.370000, 40.550000, 40.120000, 41.729900, 40.510000, 40.300000, 39.130000, 38.820000, 39.470000, 38.360000, 38.910000, 39.440000, 39.550000, 40.100000, 38.140000, 36.050000, 35.590000, 35.430000, 35.460000, 35.430000, 34.880000, 35.030000, 34.720000, 35.000000, 35.330000, 35.000000, 35.230000, 35.000000, 35.090000, 34.620000, 34.130000, 33.280000, 33.620000, 33.380000, 33.320000, 32.880000, 32.850000, 32.640000, 32.310000, 33.640000, 33.800000, 34.080000, 34.300000, 35.100000, 35.710000, 34.500000, 34.130000, 34.510000, 33.480000, 32.350000, 32.630000, 32.730000, 33.180000, 34.030000, 34.780000, 35.030000, 35.960000, 36.950000, 37.650000, 38.450000, 39.000000, 38.860000, 39.260000, 38.999900, 38.700000, 38.760000, 38.870000, 37.920000, 37.250000, 36.930000, 37.360000, 37.420000, 37.000000, 37.150000, 36.520000, 36.700000, 36.000000, 36.660000, 36.170000, 36.000000, 36.620000, 36.100000, 36.340000, 36.210000, 35.790000, 35.980000, 36.140000, 36.320000, 36.020000, 35.650000, 34.720000, 35.090000, 35.190000, 34.840000, 34.620000, 34.890000, 34.960000, 35.140000, 34.750000, 34.340000, 33.890000, 34.340000, 33.990000, 34.200000, 34.150000, 34.220000, 35.230000, 34.720000, 34.020000, 34.630000, 34.420000, 34.600000, 34.100000, 34.400000, 34.260000, 33.500000, 33.500000, 33.300000, 33.400000, 33.220000, 34.450000, 32.870000, 32.390000, 32.890000, 33.560000, 33.000000, 32.780000, 32.780000, 34.170000, 33.600000, 33.700000, 33.560000, 34.380000, 34.120000, 33.560000, 33.190000, 33.210000, 33.150000, 33.340000, 32.830000, 33.370000, 33.170000, 33.120000, 33.670000, 33.340000, 32.880000, 33.310000, 33.910000, 33.490000, 33.840000, 33.490000, 33.870000, 33.610000, 33.550000, 33.460000, 33.280000, 32.960000, 33.160000, 33.900000, 33.830000, 33.310000, 32.960000, 32.490000, 30.970000, 31.950000, 31.370000, 31.970000, 32.900000, 32.640000, 31.540000, 31.520000, 31.160000, 31.120000, 30.960000, 31.010000, 29.820000, 29.250000, 28.750000, 27.950000, 29.670000, 31.710000, 30.830000, 31.380000, 31.640000, 30.670000, 32.020000, 30.480000, 30.730000, 31.910000, 31.410000, 30.660000, 31.440000, 30.410000, 31.170000, 31.510000, 32.820000, 33.800000, 32.860000, 33.280000, 32.970000, 32.920000, 33.580000, 33.430000, 33.670000, 33.990000, 35.230000, 34.230000, 34.600000, 33.970000, 32.130000, 32.780000, 32.900000, 31.890000, 31.070000, 31.160000, 30.260000, 29.660000, 29.850000, 29.410000, 29.050000, 30.230000, 29.360000, 30.510000, 32.830000, 32.620000, 30.880000, 30.740000, 30.450000, 30.470000, 30.110000, 29.690000, 29.430000, 29.470000, 29.540000, 28.320000, 28.490000, 28.280000, 28.840000, 29.530000, 29.600000, 29.560000, 30.040000, 28.850000, 28.750000, 29.140000, 29.090000, 28.760000, 28.490000, 28.560000, 27.900000, 27.210000, 26.290000, 26.860000, 27.700000, 27.100000, 26.850000, 26.870000, 26.970000, 27.260000, 27.740000, 28.560000, 28.240000, 27.920000, 27.780000, 26.820000, 27.630000, 27.640000, 27.750000, 28.410000, 29.750000, 29.020000, 29.000000, 29.160000, 29.440000, 28.520000, 28.230000, 27.840000, 27.350000, 26.950000, 28.090000, 27.600000, 27.920000, 28.880000, 27.660000, 28.050000, 28.660000, 28.650000, 28.530000, 29.120000, 29.720000, 28.820000, 28.900000, 28.970000, 29.480000, 29.830000, 30.870000, 30.610000, 30.960000, 30.830000, 31.600000, 31.270000, 30.960000, 31.660000, 32.590000, 32.740000, 32.770000, 32.900000, 32.810000, 32.870000, 33.380000, 32.940000, 32.800000, 33.290000, 33.820000, 33.790000, 34.120000, 34.520000, 34.280000, 34.030000, 34.600000, 35.060000, 35.010000, 35.220000, 35.150000, 35.180000, 35.380000, 35.000000, 34.980000, 35.110000, 34.820000, 34.730000, 34.590000, 34.570000, 33.920000, 33.910000, 34.210000, 34.080000, 34.000000, 33.850000, 33.840000, 34.490000, 34.260000, 34.050000, 33.760000, 33.650000, 33.880000, 34.350000, 34.220000, 33.390000, 33.240000, 33.130000, 32.600000, 32.340000, 31.800000, 31.670000, 31.640000, 31.510000, 31.240000, 30.390000, 30.370000, 30.340000, 30.080000, 29.800000, 29.490000, 30.380000, 29.700000, 29.550000, 29.430000, 28.750000, 28.900000, 28.270000, 27.870000, 25.510000, 25.890000, 26.470000, 26.570000, 26.370000, 25.870000, 25.750000, 25.940000, 25.830000, 25.250000, 25.230000, 25.090000, 25.160000, 26.080000, 26.720000, 27.000000, 26.810000, 26.570000, 26.450000, 25.750000, 24.810000, 23.090000, 22.950000, 21.760000, 22.170000, 23.010000, 23.000000, 22.960000, 23.340000, 23.390000, 23.100000, 23.100000, 23.000000, 22.560000, 22.580000, 22.990000, 22.340000, 22.890000, 23.670000, 23.300000, 23.730000, 23.500000, 24.000000, 23.410000, 24.330000, 23.550000, 22.780000, 21.750000, 22.140000, 21.000000, 22.180000, 22.930000, 23.960000, 23.250000, 23.240000, 23.320000, 22.730000, 22.340000, 22.970000, 23.840000, 23.000000, 23.140000, 23.300000, 22.430000, 21.050000, 20.910000, 20.470000, 21.250000, 21.840000, 21.580000, 21.530000, 22.360000, 22.180000, 22.260000, 22.340000, 22.250000, 22.440000, 22.590000, 20.490000, 20.290000, 20.770000, 22.370000, 21.120000, 20.260000, 20.410000, 19.640000, 19.510000, 19.020000, 19.110000, 19.190000, 18.780000, 18.320000, 18.070000, 16.840000, 16.540000, 16.240000, 17.590000, 17.220000, 17.240000, 15.910000, 15.860000, 15.160000, 15.430000, 15.290000, 15.700000, 16.160000, 17.010000, 17.680000, 17.400000, 17.810000, 17.200000, 17.090000, 17.670000, 17.660000, 17.020000, 15.710000, 16.780000, 17.080000, 16.320000, 15.710000, 15.850000, 15.470000, 16.530000, 16.050000, 16.950000, 19.360000, 18.940000, 18.810000, 19.270000, 20.400000, 19.680000, 20.240000, 20.170000, 20.820000, 21.070000, 20.790000, 20.310000, 20.420000, 19.490000, 19.270000, 20.210000, 20.090000, 19.410000, 20.530000, 19.350000, 20.440000, 20.330000, 18.920000, 19.710000, 20.130000, 20.350000, 20.390000, 20.860000, 21.890000, 22.520000, 21.440000, 22.020000, 21.860000, 20.680000, 22.410000, 21.570000, 21.870000, 22.600000, 23.250000, 22.890000, 22.640000, 21.800000, 21.630000, 21.020000, 22.000000, 21.670000, 22.880000, 22.740000, 22.260000, 22.610000, 22.420000, 22.350000, 22.300000, 23.710000, 23.490000, 23.130000, 23.780000, 23.140000, 22.600000, 22.390000, 23.700000, 23.400000, 23.460000, 23.000000, 22.880000, 22.410000, 23.080000, 23.790000, 23.710000, 23.330000, 24.030000, 24.290000, 24.370000, 23.790000, 23.650000, 24.140000, 24.370000, 24.800000, 24.190000, 24.110000, 23.910000, 23.780000, 22.810000, 23.380000, 22.890000, 23.430000, 23.570000, 24.230000, 23.690000, 24.020000, 23.820000, 24.360000, 24.790000, 24.650000, 24.610000, 25.700000, 24.850000, 25.560000, 24.920000, 25.020000, 24.630000, 24.260000, 24.660000, 25.040000, 24.980000, 25.520000, 25.390000, 24.530000, 24.530000, 24.560000, 26.280000, 25.330000, 25.810000, 25.260000, 25.280000, 26.160000, 25.850000, 25.500000, 26.000000, 25.960000, 26.140000, 25.450000, 25.080000, 25.370000, 24.690000, 24.500000, 24.410000, 24.200000, 23.410000, 23.770000, 23.600000, 23.560000, 24.580000, 23.770000, 24.050000, 24.370000, 24.460000, 24.740000, 25.050000, 24.330000, 24.280000, 24.720000, 25.200000, 25.330000, 25.100000, 25.120000, 25.950000, 25.710000, 25.830000, 25.940000, 25.620000, 25.510000, 25.310000, 25.320000, 24.700000, 24.440000, 24.690000, 24.390000, 24.800000, 25.500000, 26.220000, 25.920000, 26.020000, 25.110000, 25.150000, 24.710000, 24.520000, 24.500000, 24.790000, 25.550000, 25.720000, 25.540000, 24.820000, 24.590000, 25.420000, 25.580000, 25.890000, 25.600000, 25.540000, 25.750000, 25.550000, 25.570000, 24.030000, 23.550000, 23.710000, 23.040000, 22.330000, 22.160000, 22.220000, 21.090000, 21.890000, 21.250000, 20.940000, 20.370000, 20.920000, 20.700000, 20.460000, 20.990000, 20.270000, 20.300000, 20.140000, 20.450000, 20.330000, 20.310000, 21.690000, 21.320000, 21.310000, 22.160000, 22.250000, 21.880000, 21.780000, 22.360000, 23.140000, 22.370000, 23.040000, 23.180000, 22.300000, 24.440000, 23.060000, 22.720000, 23.200000, 23.870000, 23.740000, 23.830000, 24.640000, 24.170000, 23.480000, 23.310000, 22.520000, 22.650000, 21.850000, 21.940000, 22.190000, 22.360000, 22.400000, 22.890000, 23.240000, 22.800000, 22.810000, 22.980000, 21.930000, 22.400000, 22.740000, 22.800000, 22.410000, 22.280000, 22.300000, 22.210000, 21.850000, 21.530000, 21.730000, 22.720000, 22.320000, 22.350000, 23.000000, 23.080000, 23.080000, 22.320000, 21.180000, 21.640000, 20.660000, 21.280000, 23.750000, 23.730000, 23.340000, 23.870000, 24.170000, 23.900000, 23.860000, 24.140000, 23.920000, 23.010000, 22.930000, 22.840000, 22.950000, 22.600000, 22.440000, 22.040000, 22.110000, 22.520000, 22.460000, 22.680000, 23.120000, 22.820000, 22.590000, 23.040000, 22.780000, 22.630000, 22.540000, 22.670000, 22.390000, 22.300000, 22.750000, 22.470000, 22.520000, 22.880000, 22.840000, 23.190000, 23.630000, 24.190000, 23.470000, 23.950000, 24.050000, 24.880000, 24.500000, 24.920000, 24.640000, 24.510000, 24.540000, 24.470000, 24.000000, 23.940000, 24.240000, 24.070000, 24.390000, 24.210000, 23.790000, 24.170000, 24.110000, 24.660000, 23.350000, 23.850000, 24.020000, 24.070000, 23.650000, 23.940000, 23.750000, 23.990000, 25.250000, 24.890000, 24.610000, 24.260000, 23.720000, 23.100000, 23.130000, 23.610000, 23.500000, 23.500000, 22.730000, 22.840000, 22.740000, 22.240000, 22.280000, 22.070000, 21.400000, 21.740000, 21.860000, 21.950000, 22.360000, 23.110000, 22.880000, 23.130000, 23.000000, 23.460000, 23.040000, 22.280000, 22.300000, 22.430000, 22.880000, 23.860000, 24.480000, 24.580000, 24.620000, 25.160000, 25.090000, 24.910000, 24.950000, 24.160000, 24.260000, 24.640000, 25.060000, 25.060000, 25.430000, 25.370000, 25.530000, 24.880000, 25.330000, 24.980000, 24.900000, 25.120000, 25.230000, 25.180000, 25.100000, 25.320000, 24.570000, 25.190000, 24.670000, 24.570000, 24.640000, 24.280000, 24.750000, 24.890000, 24.480000, 24.720000, 24.380000, 24.560000, 24.490000, 24.510000, 24.340000, 24.590000, 24.860000, 24.890000, 25.030000, 24.770000, 24.550000, 24.720000, 24.940000, 25.550000, 26.660000, 27.060000, 26.900000, 26.520000, 26.120000, 26.150000, 26.570000, 27.130000, 26.750000, 26.780000, 26.460000, 26.340000, 26.130000, 26.340000, 26.730000, 26.810000, 27.000000, 26.830000, 26.590000, 26.480000, 26.720000, 26.740000, 26.570000, 26.190000, 25.710000, 25.610000, 25.440000, 25.420000, 26.230000, 25.990000, 25.690000, 25.780000, 25.500000, 25.520000, 25.670000, 25.270000, 24.790000, 24.410000, 25.200000, 26.850000, 27.400000, 27.750000, 27.970000, 28.230000, 28.600000, 27.450000, 27.800000, 28.250000, 28.310000, 28.890000, 28.810000, 28.360000, 28.580000, 28.910000, 29.240000, 29.130000, 29.280000, 29.490000, 29.400000, 29.250000, 29.390000, 29.210000, 29.230000, 28.510000, 28.430000, 28.280000, 27.950000, 27.890000, 28.030000, 28.390000, 27.580000, 27.680000, 27.450000, 26.870000, 27.130000, 26.760000, 26.830000, 27.700000, 27.750000, 28.010000, 28.320000, 28.690000, 28.310000, 29.180000, 29.090000, 28.580000, 28.860000, 28.940000, 29.140000, 28.190000, 28.300000, 28.780000, 27.800000, 27.880000, 27.570000, 27.710000, 28.170000, 28.600000, 28.270000, 27.860000, 27.560000, 26.870000, 26.750000, 26.290000, 25.820000, 24.760000, 23.650000, 24.540000} + +func RealSample(num int) []sample { + samples := []sample{} + for i := 0; i < len(timeList) && i < num; i++ { + samples = append(samples, sample{t: timeList[i], v: valList[i]}) + } + return samples +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go new file mode 100644 index 00000000..d918dd41 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go @@ -0,0 +1,248 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package chunkenc + +import ( + "encoding/binary" + "fmt" + "math" + "strconv" + + "github.com/nuclio/logger" +) + +const ( + varTypeNil byte = 0 + // nolint: deadcode,varcheck + varTypeBlob byte = 1 + varTypeString byte = 2 + // nolint: deadcode,varcheck + varTypeBool byte = 3 + // nolint: deadcode,varcheck + varTypeFloat32 byte = 4 + varTypeFloat64 byte = 5 + // nolint: deadcode,varcheck + varTypeInt8 byte = 8 + // nolint: deadcode,varcheck + varTypeInt16 byte = 9 + // nolint: deadcode,varcheck + varTypeInt32 byte = 10 + // nolint: deadcode,varcheck + varTypeInt64 byte = 11 +) + +const ( + varValueNone byte = 0 + // nolint: deadcode,varcheck + varValueZero byte = 1 + // nolint: deadcode,varcheck + varValueOnes byte = 2 + varValueAny byte = 3 +) + +// Type encoding: 6 bits for var type, 2 bits for predefined type values (e.g. None, zero, NaN, ..) +func decodeType(t byte) (byte, byte) { return t >> 2, t & 3 } +func encodeType(varType, val byte) byte { return varType<<2 + val&3 } + +type VarChunk struct { + logger logger.Logger + + b []byte + samples uint16 + offset int +} + +// NewVarChunk returns a new chunk with variant encoding. +func newVarChunk(logger logger.Logger) Chunk { + return &VarChunk{logger: logger, b: make([]byte, 0, 1024)} +} + +// Encoding returns the encoding type. +func (c *VarChunk) Encoding() Encoding { + return EncVariant +} + +// Bytes returns the underlying byte slice of the chunk. +func (c *VarChunk) Bytes() []byte { + return c.b +} + +func (c *VarChunk) Clear() { + c.b = c.b[:0] +} + +// Appender implements the Chunk interface. +func (c *VarChunk) Appender() (Appender, error) { + a := &varAppender{logger: c.logger, c: c, samples: &c.samples} + return a, nil +} + +// Iterator implements the Chunk interface. +func (c *VarChunk) Iterator() Iterator { + return c.iterator() +} + +type varAppender struct { + logger logger.Logger + + c *VarChunk + samples *uint16 + t int64 +} + +func (a *varAppender) Encoding() Encoding { + return a.c.Encoding() +} + +func (a *varAppender) Chunk() Chunk { + return a.c +} + +func (a *varAppender) Append(t int64, v interface{}) { + if v == nil { + a.appendNoValue(t, varTypeNil, varValueNone) + return + } + + switch val := v.(type) { + case string: + a.appendWithValue(t, varTypeString, []byte(val)) + + default: + a.logger.Error("unsupported type %T of value %v\n", v, v) + } +} + +func (a *varAppender) appendNoValue(t int64, varType, varVal byte) { + head := uint64(t) & 0x00ffffffffffffff + head += uint64(encodeType(varType, varVal)) << 56 + appendUint64(&a.c.b, head) + (*a.samples)++ +} + +func appendUint64(b *[]byte, v uint64) { + for i := 0; i < 8; i++ { + *b = append(*b, byte(v)) + v = v >> 8 + } +} + +func (a *varAppender) appendWithUint(t int64, varType byte, val uint64) { + a.appendNoValue(t, varType, varValueAny) + appendUint64(&a.c.b, val) +} + +func (a *varAppender) appendWithValue(t int64, varType byte, val []byte) { + a.appendNoValue(t, varType, varValueAny) + l := uint16(len(val)) + a.c.b = append(a.c.b, byte(l)) + a.c.b = append(a.c.b, byte(l>>8)) + a.c.b = append(a.c.b, val...) +} + +func (c *VarChunk) iterator() *varIterator { + return &varIterator{ + br: c.b, + numTotal: c.samples, + } +} + +type varIterator struct { + br []byte + numTotal uint16 + numRead uint16 + + t int64 + varType byte + varVal byte + val []byte + err error +} + +func (it *varIterator) Next() bool { + if it.err != nil || len(it.br) < 8 { + return false + } + + head := binary.LittleEndian.Uint64(it.br[0:8]) + it.varType, it.varVal = decodeType(byte(head >> 56)) + it.t = int64(head & 0x00ffffffffffffff) + + it.br = it.br[8:] + + if it.varType == varTypeFloat64 && it.varVal == varValueAny { + + if len(it.br) < 8 { + return it.lenError("float64", 8) + } + it.val = it.br[0:8] + it.br = it.br[8:] + } + + if it.varType == varTypeString && it.varVal == varValueAny { + + if len(it.br) < 2 { + return it.lenError("var len", 2) + } + valLen := int(it.br[1])<<8 + int(it.br[0]) + + if len(it.br) < valLen+2 { + return it.lenError("string", valLen) + } + it.val = it.br[2 : valLen+2] + it.br = it.br[valLen+2:] + } + + return true +} + +func (it *varIterator) lenError(v string, expected int) bool { + it.err = fmt.Errorf("chunk decoding error, less than %d bytes to store %s value", expected, v) + return false +} + +func (it *varIterator) At() (int64, float64) { + + if it.varType == varTypeFloat64 { + switch it.varVal { + case varValueNone: + return it.t, math.NaN() + case varValueAny: + v := binary.LittleEndian.Uint64(it.val) + return it.t, math.Float64frombits(v) + } + } + return it.t, 0 +} + +func (it *varIterator) AtString() (int64, string) { + + if it.varType == varTypeFloat64 { + _, val := it.At() + return it.t, strconv.FormatFloat(val, 'f', -1, 64) + } + + return it.t, string(it.val) +} + +func (it *varIterator) Err() error { + return it.err +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype_test.go new file mode 100644 index 00000000..d8fcdef6 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype_test.go @@ -0,0 +1,54 @@ +// +build unit + +package chunkenc + +import ( + "fmt" + "testing" + "time" + + "github.com/nuclio/zap" + "github.com/stretchr/testify/suite" +) + +type testVarEncoderSuite struct { + suite.Suite +} + +func (suite *testVarEncoderSuite) TestStringEnc() { + + logger, err := nucliozap.NewNuclioZapTest("test") + suite.Require().Nil(err) + + chunk := newVarChunk(logger) + appender, err := chunk.Appender() + suite.Require().Nil(err) + + list := []string{"abc", "", "123456"} + t0 := time.Now().UnixNano() / 1000 + + for i, s := range list { + t := t0 + int64(i*1000) + appender.Append(t, s) + } + + iterChunk, err := FromData(logger, EncVariant, chunk.Bytes(), 0) + suite.Require().Nil(err) + + iter := iterChunk.Iterator() + i := 0 + for iter.Next() { + t, v := iter.AtString() + suite.Require().Equal(t, t0+int64(i*1000)) + suite.Require().Equal(v, list[i]) + fmt.Println("t, v: ", t, v) + i++ + } + + suite.Require().Nil(iter.Err()) + suite.Require().Equal(i, len(list)) +} + +func TestVarEncoderSuite(t *testing.T) { + suite.Run(t, new(testVarEncoderSuite)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go new file mode 100644 index 00000000..bfe9a5e3 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go @@ -0,0 +1,484 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. + +The code in this file was largely written by Damian Gryski as part of +https://github.com/dgryski/go-tsz and published under the license below. +and was later on modified by the Prometheus project in +https://github.com/prometheus/prometheus +Which are licensed under the Apache License, Version 2.0 (the "License"); + +Followed by modifications found here to suit Iguazio needs + +Copyright (c) 2015,2016 Damian Gryski +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +package chunkenc + +import ( + "math" + "math/bits" + "strconv" + + "github.com/nuclio/logger" +) + +// XORChunk holds XOR encoded sample data. +type XORChunk struct { + logger logger.Logger + + b *bstream + samples uint16 + offset int +} + +// NewXORChunk returns a new chunk with XOR encoding of the given size. +func newXORChunk(logger logger.Logger) Chunk { + //b := make([]byte, 32, 32) + return &XORChunk{logger: logger, b: newBWriter(256)} +} + +// Encoding returns the encoding type. +func (c *XORChunk) Encoding() Encoding { + return EncXOR +} + +// Bytes returns the underlying byte slice of the chunk. +func (c *XORChunk) Bytes() []byte { + //return c.b.getbytes() + return c.b.bytes() +} + +func (c *XORChunk) Clear() { + //c.b.rptr = c.b.getLen() + c.b.clear() +} + +// Appender implements the Chunk interface. +// new implementation, doesnt read the existing buffer, assume its new +func (c *XORChunk) Appender() (Appender, error) { + a := &xorAppender{logger: c.logger, c: c, b: c.b, samples: &c.samples} + if c.samples == 0 { + a.leading = 0xff + } + return a, nil +} + +/* old Appender TODO: do we need to append to existing buffer? maybe in stateless/slow clients +func (c *XORChunk) aAppender() (Appender, error) { + it := c.iterator() + + // To get an appender we must know the state it would have if we had + // appended all existing data from scratch. + // We iterate through the end and populate via the iterator's state. + for it.Next() { + } + if err := it.error(); err != nil { + return nil, err + } + + a := &xorAppender{ + c: c, + b: c.b, + samples: &c.samples, + t: it.t, + v: it.val, + tDelta: it.tDelta, + leading: it.leading, + trailing: it.trailing, + } + if c.samples == 0 { + a.leading = 0xff + } + return a, nil +} +*/ + +func (c *XORChunk) iterator() *xorIterator { + // Should iterators guarantee to act on a copy of the data so it doesn't lock append? + // When using striped locks to guard access to chunks, probably yes. + // Could only copy data if the chunk is not completed yet. + return &xorIterator{ + br: newBReader(c.b.bytes()), // TODO: may need merge + numTotal: c.samples, + } +} + +// Iterator implements the Chunk interface. +func (c *XORChunk) Iterator() Iterator { + return c.iterator() +} + +type xorAppender struct { + logger logger.Logger + + c *XORChunk + b *bstream + samples *uint16 + + t int64 + v float64 + tDelta uint64 + + leading uint8 + trailing uint8 + + isPreviousNewSeries bool +} + +func (a *xorAppender) Encoding() Encoding { + return a.Chunk().Encoding() +} + +func (a *xorAppender) Chunk() Chunk { + return a.c +} + +func (a *xorAppender) Append(t int64, vvar interface{}) { + var tDelta uint64 + num := *a.samples + + var v float64 + switch typedValue := vvar.(type) { + case int: + v = float64(typedValue) + case float64: + v = typedValue + default: + a.logger.Warn("Discarding sample {time: %d, value: %v}, as it's value is of incompatible data type. "+ + "Reason: expected 'float' actual '%T'.", t, vvar, vvar) + return + } + + // Do not append if sample is too old. + if t < a.t { + a.logger.Info("Discarding sample from %d, as it is older than the latest sample (%d).", t, a.t) + return + } + + // We write time deltas as 32 bits (for compression) if the delta is too large we'll start a new series + tDelta = uint64(t - a.t) + shouldStartNewSeries := num == 0 || bits.Len64(tDelta) >= 32 + + if shouldStartNewSeries { + // add a signature 11111 to indicate start of cseries in case we put few in the same chunk (append to existing) + a.b.writeBits(0x1f, 5) + a.b.writeBits(uint64(t), 51) + a.b.writeBits(math.Float64bits(v), 64) + a.isPreviousNewSeries = true + tDelta = 0 // saving time delta for the first element is redundant + } else if a.isPreviousNewSeries { + a.b.writeBits(tDelta, 32) + a.writeVDelta(v) + a.isPreviousNewSeries = false + } else { + dod := int64(tDelta - a.tDelta) + + // Gorilla has a max resolution of seconds, Prometheus milliseconds. + // Thus we use higher value range steps with larger bit size. + switch { + case dod == 0: + a.b.writeBit(zero) + case bitRange(dod, 14): + a.b.writeBits(0x02, 2) // '10' + a.b.writeBits(uint64(dod), 14) + case bitRange(dod, 17): + a.b.writeBits(0x06, 3) // '110' + a.b.writeBits(uint64(dod), 17) + case bitRange(dod, 20): + a.b.writeBits(0x0e, 4) // '1110' + a.b.writeBits(uint64(dod), 20) + default: + a.b.writeBits(0x1e, 5) // '11110' + a.b.writeBits(uint64(dod), 32) + } + + a.writeVDelta(v) + + } + + a.t = t + a.v = v + (*a.samples)++ + a.tDelta = tDelta + + a.b.padToByte() +} + +func bitRange(x int64, nbits uint8) bool { + return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1) +} + +func (a *xorAppender) writeVDelta(v float64) { + vDelta := math.Float64bits(v) ^ math.Float64bits(a.v) + + if vDelta == 0 { + a.b.writeBit(zero) + return + } + a.b.writeBit(one) + + leading := uint8(bits.LeadingZeros64(vDelta)) + trailing := uint8(bits.TrailingZeros64(vDelta)) + + // Clamp number of leading zeros to avoid overflow when encoding. + if leading >= 32 { + leading = 31 + } + + if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing { + a.b.writeBit(zero) + a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing)) + } else { + a.leading, a.trailing = leading, trailing + + a.b.writeBit(one) + a.b.writeBits(uint64(leading), 5) + + // Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have. + // Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0). + // So instead we write out a 0 and adjust it back to 64 on unpacking. + sigbits := 64 - leading - trailing + a.b.writeBits(uint64(sigbits), 6) + a.b.writeBits(vDelta>>trailing, int(sigbits)) + } +} + +type xorIterator struct { + br *bstream + numTotal uint16 + numRead uint16 + + t int64 + val float64 + + leading uint8 + trailing uint8 + + tDelta uint64 + err error +} + +func (it *xorIterator) At() (int64, float64) { + return it.t, it.val +} + +func (it *xorIterator) AtString() (int64, string) { + return it.t, strconv.FormatFloat(it.val, 'f', -1, 64) +} + +func (it *xorIterator) Err() error { + return it.err +} + +func (it *xorIterator) Next() bool { + if it.err != nil || len(it.br.stream) == 0 || (len(it.br.stream) == 1 && it.br.count == 0) { + return false + } + + if it.numRead == 0 { + t, err := it.br.readBits(56) // unlike Gorilla we read a 56bit cropped int (time in year 2000+ has 48bit) + //t, err := binary.ReadVarint(it.br) + if err != nil { + it.err = err + return false + } + t = t & ((0x80 << 40) - 1) + v, err := it.br.readBits(64) + if err != nil { + it.err = err + return false + } + it.t = int64(t) + it.val = math.Float64frombits(v) + + it.numRead++ + return true + } + + // check if this a starting from scratch, signature is 111110xx + isRestart := (it.br.PeekByte() & 0xfc) == 0xf8 + + if it.numRead == 1 && !isRestart { + tDelta, err := it.br.readBits(32) + if err != nil { + it.err = err + return false + } + it.tDelta = tDelta + it.t = it.t + int64(it.tDelta) + + rv := it.readValue() + it.br.padToByte() + + return rv + } + + var d byte + // read delta-of-delta + for i := 0; i < 5; i++ { + d <<= 1 + bit, err := it.br.readBit() + if err != nil { + it.err = err + return false + } + if bit == zero { + break + } + d |= 1 + } + var sz uint8 + var dod int64 + switch d { + case 0x00: + // dod == 0 + case 0x02: + sz = 14 + case 0x06: + sz = 17 + case 0x0e: + sz = 20 + case 0x1e: + bits, err := it.br.readBits(32) + if err != nil { + it.err = err + return false + } + + dod = int64(int32(bits)) + case 0x1f: + // added this case to allow append of a new Gorilla series on an existing chunk (restart from t0) + + t, err := it.br.readBits(51) + //t, err := binary.ReadVarint(it.br) + if err != nil { + it.err = err + return false + } + //t = t & ((0x80 << 40) - 1) + v, err := it.br.readBits(64) + if err != nil { + it.err = err + return false + } + it.t = int64(t) + it.val = math.Float64frombits(v) + + it.numRead = 1 + return true + } + + if sz != 0 { + bits, err := it.br.readBits(int(sz)) + if err != nil { + it.err = err + return false + } + if bits > (1 << (sz - 1)) { + // or something + bits = bits - (1 << sz) + } + dod = int64(bits) + } + + it.tDelta = uint64(int64(it.tDelta) + dod) + it.t = it.t + int64(it.tDelta) + + rv := it.readValue() + it.br.padToByte() + + return rv +} + +func (it *xorIterator) readValue() bool { + bit, err := it.br.readBit() + if err != nil { + it.err = err + return false + } + + if bit == zero { + // it.val = it.val + } else { + bit, err := it.br.readBit() + if err != nil { + it.err = err + return false + } + if bit == zero { + // reuse leading/trailing zero bits + // it.leading, it.trailing = it.leading, it.trailing + } else { + bits, err := it.br.readBits(5) + if err != nil { + it.err = err + return false + } + it.leading = uint8(bits) + + bits, err = it.br.readBits(6) + if err != nil { + it.err = err + return false + } + mbits := uint8(bits) + // 0 significant bits here means we overflowed and we actually need 64; see comment in encoder + if mbits == 0 { + mbits = 64 + } + it.trailing = 64 - it.leading - mbits + } + + mbits := int(64 - it.leading - it.trailing) + bits, err := it.br.readBits(mbits) + if err != nil { + it.err = err + return false + } + vbits := math.Float64bits(it.val) + vbits ^= (bits << it.trailing) + it.val = math.Float64frombits(vbits) + } + + it.numRead++ + return true +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go new file mode 100644 index 00000000..494f4e58 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go @@ -0,0 +1,458 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package config + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" + "sync" + + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "github.com/pkg/errors" +) + +var defaultDisableNginxMitigation = true + +const ( + V3ioConfigEnvironmentVariable = "V3IO_TSDB_CONFIG" + DefaultConfigurationFileName = "v3io-tsdb-config.yaml" + SchemaConfigFileName = ".schema" + + defaultNumberOfIngestWorkers = 1 + defaultNumberOfQueryWorkers = 8 + defaultBatchSize = 64 + defaultTimeoutInSeconds = 24 * 60 * 60 // 24 hours + + defaultMaximumSampleSize = 8 // bytes + defaultMaximumPartitionSize = 1700000 // 1.7MB + defaultMinimumChunkSize = 200 // bytes + defaultMaximumChunkSize = 32000 // bytes + + DefaultShardingBucketsCount = 8 + DefaultStorageClass = "local" + DefaultIngestionRate = "" + DefaultAggregates = "" // no aggregates by default + DefaultAggregationGranularity = "1h" + DefaultLayerRetentionTime = "1y" + DefaultSampleRetentionTime = 0 + DefaultLogLevel = "info" + DefaultVerboseLevel = "debug" + DefaultUseServerAggregateCoefficient = 3 + + // KV attribute names + MaxTimeAttrName = "_maxtime" + LabelSetAttrName = "_lset" + EncodingAttrName = "_enc" + OutOfOrderAttrName = "_ooo" + MetricNameAttrName = "_name" + ObjectNameAttrName = "__name" + ChunkAttrPrefix = "_v" + AggregateAttrPrefix = "_v_" + MtimeSecsAttributeName = "__mtime_secs" + MtimeNSecsAttributeName = "__mtime_nsecs" + + PrometheusMetricNameAttribute = "__name__" + + NamesDirectory = "names" +) + +type BuildInfo struct { + BuildTime string `json:"buildTime,omitempty"` + Os string `json:"os,omitempty"` + Architecture string `json:"architecture,omitempty"` + Version string `json:"version,omitempty"` + CommitHash string `json:"commitHash,omitempty"` + Branch string `json:"branch,omitempty"` +} + +func (bi *BuildInfo) String() string { + return fmt.Sprintf("Build time: %s\nOS: %s\nArchitecture: %s\nVersion: %s\nCommit Hash: %s\nBranch: %s\n", + bi.BuildTime, + bi.Os, + bi.Architecture, + bi.Version, + bi.CommitHash, + bi.Branch) +} + +var ( + // Note, following variables set by make + buildTime, osys, architecture, version, commitHash, branch string + + instance *V3ioConfig + once sync.Once + failure error + + BuildMetadta = &BuildInfo{ + BuildTime: buildTime, + Os: osys, + Architecture: architecture, + Version: version, + CommitHash: commitHash, + Branch: branch, + } +) + +func Error() error { + return failure +} + +type V3ioConfig struct { + // V3IO TSDB connection information - web-gateway service endpoint, + // TSDB data container, relative TSDB table path within the container, and + // authentication credentials for the web-gateway service + WebAPIEndpoint string `json:"webApiEndpoint"` + Container string `json:"container"` + TablePath string `json:"tablePath"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + AccessKey string `json:"accessKey,omitempty"` + + HTTPTimeout string `json:"httpTimeout,omitempty"` + + // Disabled = true disables the V3IO TSDB configuration in Prometheus and + // enables the internal Prometheus TSDB instead + Disabled bool `json:"disabled,omitempty"` + // Log level - "debug" | "info" | "warn" | "error" + LogLevel string `json:"logLevel,omitempty"` + // Number of parallel V3IO worker routines + Workers int `json:"workers"` + // Number of parallel V3IO worker routines for queries; + // default = the minimum value between 8 and Workers + QryWorkers int `json:"qryWorkers"` + // Override last chunk; by default, an append from the last point is attempted upon restart + OverrideOld bool `json:"overrideOld"` + // Default timeout duration, in seconds; default = 3,600 seconds (1 hour) + DefaultTimeoutInSeconds int `json:"timeout,omitempty"` + // Size of the samples batch to use during ingestion + BatchSize int `json:"batchSize,omitempty"` + // Maximum sample size, in bytes (for the worst compression scenario) + MaximumSampleSize int `json:"maximumSampleSize,omitempty"` + // Maximum size of a partition object + MaximumPartitionSize int `json:"maximumPartitionSize,omitempty"` + // Minimum chunk size, in bytes (for the best compression scenario) + MinimumChunkSize int `json:"minimumChunkSize,omitempty"` + // Maximum chunk size, in bytes (for the worst compression scenario) + MaximumChunkSize int `json:"maximumChunkSize,omitempty"` + // Number of sharding buckets + ShardingBucketsCount int `json:"shardingBucketsCount,omitempty"` + // Metrics-reporter configuration + MetricsReporter MetricsReporterConfig `json:"performance,omitempty"` + // Don't aggregate from raw chunks, for use when working as a Prometheus + // TSDB library + DisableClientAggr bool `json:"disableClientAggr,omitempty"` + // Build Info + BuildInfo *BuildInfo `json:"buildInfo,omitempty"` + // Override nginx bug + DisableNginxMitigation *bool `json:"disableNginxMitigation,omitempty"` + // explicitly always use client aggregation + UsePreciseAggregations bool `json:"usePreciseAggregations,omitempty"` + // Coefficient to decide whether or not to use server aggregates optimization + // use server aggregations if ` / > UseServerAggregateCoefficient` + UseServerAggregateCoefficient int `json:"useServerAggregateCoefficient,omitempty"` + LoadPartitionsFromSchemaAttr bool `json:"loadPartitionsFromSchemaAttr,omitempty"` + RequestChanLength int `json:"RequestChanLength,omitempty"` +} + +type MetricsReporterConfig struct { + // Report on shutdown (Boolean) + ReportOnShutdown bool `json:"reportOnShutdown,omitempty"` + // Output destination - "stdout" or "stderr" + Output string `json:"output"` + // Report periodically (Boolean) + ReportPeriodically bool `json:"reportPeriodically,omitempty"` + // Interval between consequence reports (in seconds) + RepotInterval int `json:"reportInterval"` +} + +type Rollup struct { + Aggregates []string `json:"aggregates"` + AggregationGranularity string `json:"aggregationGranularity"` + // Storage class for the aggregates and sample chunks - "cloud" | "local" + StorageClass string `json:"storageClass"` + // [FUTURE] Sample retention period, in hours. 0 means no need to save samples. + SampleRetention int `json:"sampleRetention"` + // Layer retention time, in months ('m'), days ('d'), or hours ('h'). + // Format: "[0-9]+[hmd]". For example: "3h", "7d", "1m" + LayerRetentionTime string `json:"layerRetentionTime"` +} + +type PreAggregate struct { + Labels []string `json:"labels"` + Granularity string `json:"granularity"` + Aggregates []string `json:"aggregates"` +} + +type TableSchema struct { + Version int `json:"version"` + RollupLayers []Rollup `json:"rollupLayers"` + ShardingBucketsCount int `json:"shardingBucketsCount"` + PartitionerInterval string `json:"partitionerInterval"` + ChunckerInterval string `json:"chunckerInterval"` + PreAggregates []PreAggregate `json:"preAggregates"` +} + +type PartitionSchema struct { + Version int `json:"version"` + Aggregates []string `json:"aggregates"` + AggregationGranularity string `json:"aggregationGranularity"` + StorageClass string `json:"storageClass"` + SampleRetention int `json:"sampleRetention"` + PartitionerInterval string `json:"partitionerInterval"` + ChunckerInterval string `json:"chunckerInterval"` +} + +type Partition struct { + StartTime int64 `json:"startTime"` + SchemaInfo PartitionSchema `json:"schemaInfo"` +} + +type SchemaField struct { + Name string `json:"name"` + Type string `json:"type"` + Nullable bool `json:"nullable"` + Items string `json:"items,omitempty"` +} + +type Schema struct { + TableSchemaInfo TableSchema `json:"tableSchemaInfo"` + PartitionSchemaInfo PartitionSchema `json:"partitionSchemaInfo"` + Partitions []*Partition `json:"partitions"` + Fields []SchemaField `json:"fields"` +} + +type MetricConfig struct { + Rollups string `json:"rollups,omitempty"` + RollupMin int `json:"rollupMin,omitempty"` + DelRawSamples bool `json:"delRawSamples,omitempty"` + // Dimensions to pre aggregate (vertical aggregation) + PreAggragate []string `json:"preAggragate,omitempty"` +} + +// TODO: add alerts config (name, match expr, for, lables, annotations) + +func GetOrDefaultConfig() (*V3ioConfig, error) { + return GetOrLoadFromFile("") +} + +func GetOrLoadFromFile(path string) (*V3ioConfig, error) { + once.Do(func() { + instance, failure = loadConfig(path) + return + }) + + return instance, failure +} + +func GetOrLoadFromData(data []byte) (*V3ioConfig, error) { + once.Do(func() { + instance, failure = loadFromData(data) + return + }) + + return instance, failure +} + +// Update the defaults when using a configuration structure +func GetOrLoadFromStruct(cfg *V3ioConfig) (*V3ioConfig, error) { + once.Do(func() { + initDefaults(cfg) + instance = cfg + return + }) + + return instance, nil +} + +// Eagerly reloads TSDB configuration. Note: not thread-safe +func UpdateConfig(path string) { + instance, failure = loadConfig(path) +} + +// Update the defaults when using an existing configuration structure (custom configuration) +func WithDefaults(cfg *V3ioConfig) *V3ioConfig { + initDefaults(cfg) + return cfg +} + +// Create new configuration structure instance based on given instance. +// All matching attributes within result structure will be overwritten with values of newCfg +func (config *V3ioConfig) Merge(newCfg *V3ioConfig) (*V3ioConfig, error) { + resultCfg, err := config.merge(newCfg) + if err != nil { + return nil, err + } + + return resultCfg, nil +} + +func (config V3ioConfig) String() string { + if config.Password != "" { + config.Password = "SANITIZED" + } + if config.AccessKey != "" { + config.AccessKey = "SANITIZED" + } + + sanitizedConfigJSON, err := json.Marshal(&config) + if err == nil { + return string(sanitizedConfigJSON) + } + return fmt.Sprintf("Unable to read config: %v", err) +} + +func (*V3ioConfig) merge(cfg *V3ioConfig) (*V3ioConfig, error) { + mergedCfg := V3ioConfig{} + if err := mergo.Merge(&mergedCfg, cfg, mergo.WithOverride); err != nil { + return nil, errors.Wrap(err, "Unable to merge configurations.") + } + return &mergedCfg, nil +} + +func loadConfig(path string) (*V3ioConfig, error) { + + var resolvedPath string + + if strings.TrimSpace(path) != "" { + resolvedPath = path + } else { + envPath := os.Getenv(V3ioConfigEnvironmentVariable) + if envPath != "" { + resolvedPath = envPath + } + } + + if resolvedPath == "" { + resolvedPath = DefaultConfigurationFileName + } + + var data []byte + if _, err := os.Stat(resolvedPath); err != nil { + if os.IsNotExist(err) { + data = []byte{} + } else { + return nil, errors.Wrap(err, "Failed to read the TSDB configuration.") + } + } else { + data, err = ioutil.ReadFile(resolvedPath) + if err != nil { + return nil, err + } + + if len(data) == 0 { + return nil, errors.Errorf("Configuration file '%s' exists but its content is invalid.", resolvedPath) + } + } + + return loadFromData(data) +} + +func loadFromData(data []byte) (*V3ioConfig, error) { + cfg := V3ioConfig{ + BuildInfo: BuildMetadta, + } + err := yaml.Unmarshal(data, &cfg) + + if err != nil { + return nil, err + } + + initDefaults(&cfg) + + return &cfg, err +} + +func initDefaults(cfg *V3ioConfig) { + if cfg.BuildInfo == nil { + cfg.BuildInfo = BuildMetadta + } + + // Initialize the default number of workers + if cfg.Workers == 0 { + cfg.Workers = defaultNumberOfIngestWorkers + } + + // Initialize the default number of Query workers if not set to Min(8,Workers) + if cfg.QryWorkers == 0 { + if cfg.Workers < defaultNumberOfQueryWorkers { + cfg.QryWorkers = cfg.Workers + } else { + cfg.QryWorkers = defaultNumberOfQueryWorkers + } + } + + // Initialize the default batch size + if cfg.BatchSize <= 0 { + cfg.BatchSize = defaultBatchSize + } + + if cfg.DefaultTimeoutInSeconds == 0 { + cfg.DefaultTimeoutInSeconds = int(defaultTimeoutInSeconds) + } + + if cfg.MaximumChunkSize == 0 { + cfg.MaximumChunkSize = defaultMaximumChunkSize + } + + if cfg.MinimumChunkSize == 0 { + cfg.MinimumChunkSize = defaultMinimumChunkSize + } + + if cfg.MaximumSampleSize == 0 { + cfg.MaximumSampleSize = defaultMaximumSampleSize + } + + if cfg.MaximumPartitionSize == 0 { + cfg.MaximumPartitionSize = defaultMaximumPartitionSize + } + + if cfg.ShardingBucketsCount == 0 { + cfg.ShardingBucketsCount = DefaultShardingBucketsCount + } + + if cfg.UseServerAggregateCoefficient == 0 { + cfg.UseServerAggregateCoefficient = DefaultUseServerAggregateCoefficient + } + + if cfg.DisableNginxMitigation == nil { + cfg.DisableNginxMitigation = &defaultDisableNginxMitigation + } + + if cfg.WebAPIEndpoint == "" { + cfg.WebAPIEndpoint = os.Getenv("V3IO_API") + } + + if cfg.AccessKey == "" { + cfg.AccessKey = os.Getenv("V3IO_ACCESS_KEY") + } + + if cfg.Username == "" { + cfg.Username = os.Getenv("V3IO_USERNAME") + } + + if cfg.Password == "" { + cfg.Password = os.Getenv("V3IO_PASSWORD") + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config_test.go new file mode 100644 index 00000000..e7836faf --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config_test.go @@ -0,0 +1,30 @@ +// +build unit + +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSanitation(tst *testing.T) { + config := &V3ioConfig{ + AccessKey: "12345", + Username: "moses", + Password: "bla-bla-password", + } + + configAsString := config.String() + + // Name should not be sanitized + assert.Contains(tst, configAsString, "moses") + + // sensitive fields must be sanitized + assert.NotContains(tst, configAsString, "12345") + assert.NotContains(tst, configAsString, "bla-bla-password") + + // original object should not be changed + assert.Equal(tst, config.AccessKey, "12345") + assert.Equal(tst, config.Password, "bla-bla-password") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go new file mode 100644 index 00000000..2d50a450 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go @@ -0,0 +1,177 @@ +package formatter + +import ( + "encoding/csv" + "fmt" + "io" + "strconv" + "time" + + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type textFormatter struct { + baseFormatter +} + +func (f textFormatter) Write(out io.Writer, set utils.SeriesSet) error { + + for set.Next() { + series := set.At() + name, lbls := labelsToStr(series.Labels()) + fmt.Fprintf(out, "Name: %s Labels: %s\n", name, lbls) + iter := series.Iterator() + for iter.Next() { + if iter.Encoding() == chunkenc.EncXOR { + t, v := iter.At() + fmt.Fprintf(out, " %s v=%.2f\n", f.timeString(t), v) + } else { + t, v := iter.AtString() + fmt.Fprintf(out, " %s v=%v\n", f.timeString(t), v) + } + } + + if iter.Err() != nil { + return iter.Err() + } + + fmt.Fprintln(out, "") + } + + if set.Err() != nil { + return set.Err() + } + + return nil +} + +func (f textFormatter) timeString(t int64) string { + if f.cfg.TimeFormat == "" { + return strconv.Itoa(int(t)) + } + return time.Unix(t/1000, 0).Format(f.cfg.TimeFormat) +} + +type csvFormatter struct { + baseFormatter +} + +func (f csvFormatter) Write(out io.Writer, set utils.SeriesSet) error { + + writer := csv.NewWriter(out) + for set.Next() { + + series := set.At() + name, labelStr := labelsToStr(series.Labels()) + + iter := series.Iterator() + for iter.Next() { + if iter.Encoding() == chunkenc.EncXOR { + t, v := iter.At() + _ = writer.Write([]string{name, labelStr, fmt.Sprintf("%.6f", v), strconv.FormatInt(t, 10)}) + } else { + t, v := iter.AtString() + _ = writer.Write([]string{name, labelStr, fmt.Sprintf("%v", v), strconv.FormatInt(t, 10)}) + } + } + + if iter.Err() != nil { + return iter.Err() + } + } + + if set.Err() != nil { + return set.Err() + } + + writer.Flush() + return nil + +} + +type simpleJSONFormatter struct { + baseFormatter +} + +const metricTemplate = ` + { "target": "%s{%s}", + "datapoints": [%s] + }` + +func (f simpleJSONFormatter) Write(out io.Writer, set utils.SeriesSet) error { + + firstSeries := true + output := "[" + + for set.Next() { + series := set.At() + name, labelStr := labelsToStr(series.Labels()) + datapoints := "" + + iter := series.Iterator() + firstItem := true + for iter.Next() { + + if !firstItem { + datapoints = datapoints + "," + } + if iter.Encoding() == chunkenc.EncXOR { + t, v := iter.At() + datapoints = datapoints + fmt.Sprintf("[%.6f,%d]", v, t) + } else { + t, v := iter.AtString() + datapoints = datapoints + fmt.Sprintf("[\"%v\",%d]", v, t) + } + + firstItem = false + } + + if iter.Err() != nil { + return iter.Err() + } + + if !firstSeries { + output = output + "," + } + output = output + fmt.Sprintf(metricTemplate, name, labelStr, datapoints) + firstSeries = false + } + + if set.Err() != nil { + return set.Err() + } + + _, err := out.Write([]byte(output + "\n]")) + + return err +} + +type testFormatter struct { + baseFormatter +} + +func (f testFormatter) Write(out io.Writer, set utils.SeriesSet) error { + var count int + for set.Next() { + count++ + series := set.At() + iter := series.Iterator() + var i int + for iter.Next() { + i++ + } + + if iter.Err() != nil { + return errors.Errorf("error reading point for label set: %v, at index: %v, error: %v", series.Labels(), i, iter.Err()) + } + } + + if set.Err() != nil { + return set.Err() + } + + fmt.Fprintf(out, "got %v unique label sets\n", count) + return nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go new file mode 100644 index 00000000..9c1f0923 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go @@ -0,0 +1,56 @@ +package formatter + +import ( + "fmt" + "io" + "strings" + "time" + + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const DefaultOutputFormat = "text" + +func NewFormatter(format string, cfg *Config) (Formatter, error) { + if cfg == nil { + cfg = &Config{TimeFormat: time.RFC3339} + } + switch format { + case "", DefaultOutputFormat: + return textFormatter{baseFormatter{cfg: cfg}}, nil + case "csv": + return csvFormatter{baseFormatter{cfg: cfg}}, nil + case "json": + return simpleJSONFormatter{baseFormatter{cfg: cfg}}, nil + case "none": + return testFormatter{baseFormatter{cfg: cfg}}, nil + + default: + return nil, fmt.Errorf("unknown formatter type %s", format) + } +} + +type Formatter interface { + Write(out io.Writer, set utils.SeriesSet) error +} + +type Config struct { + TimeFormat string +} + +type baseFormatter struct { + cfg *Config +} + +func labelsToStr(labels utils.Labels) (string, string) { + name := "" + var lbls []string + for _, lbl := range labels { + if lbl.Name == "__name__" { + name = lbl.Value + } else { + lbls = append(lbls, lbl.Name+"="+lbl.Value) + } + } + return name, strings.Join(lbls, ",") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go new file mode 100644 index 00000000..aaf716cc --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go @@ -0,0 +1,753 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package partmgr + +import ( + "encoding/json" + "fmt" + "math" + "path" + "sort" + "strconv" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const ( + partitionAttributePrefix = "p" +) + +// Create a new partition manager +func NewPartitionMngr(schemaConfig *config.Schema, cont v3io.Container, v3ioConfig *config.V3ioConfig) (*PartitionManager, error) { + currentPartitionInterval, err := utils.Str2duration(schemaConfig.PartitionSchemaInfo.PartitionerInterval) + if err != nil { + return nil, err + } + newMngr := &PartitionManager{schemaConfig: schemaConfig, cyclic: false, container: cont, currentPartitionInterval: currentPartitionInterval, v3ioConfig: v3ioConfig} + err = newMngr.updatePartitionsFromSchema(schemaConfig, nil) + if err != nil { + return nil, err + } + return newMngr, nil +} + +type PartitionManager struct { + mtx sync.RWMutex + schemaConfig *config.Schema + schemaMtimeSecs int + schemaMtimeNanosecs int + headPartition *DBPartition + partitions []*DBPartition + cyclic bool + container v3io.Container + currentPartitionInterval int64 //TODO update on schema changes + v3ioConfig *config.V3ioConfig +} + +func (p *PartitionManager) GetSchemaFilePath() string { + return path.Join(p.Path(), config.SchemaConfigFileName) +} + +func (p *PartitionManager) GetPartitionsTablePath() string { + return path.Join(p.Path(), "partitions") +} + +func (p *PartitionManager) Path() string { + return p.v3ioConfig.TablePath +} + +func (p *PartitionManager) GetPartitionsPaths() []string { + var paths []string + for _, part := range p.partitions { + paths = append(paths, part.GetTablePath()) + } + return paths +} + +func (p *PartitionManager) GetConfig() *config.Schema { + return p.schemaConfig +} + +func (p *PartitionManager) Init() error { + return nil +} + +func (p *PartitionManager) TimeToPart(t int64) (*DBPartition, error) { + if p.headPartition == nil { + // Rounding t to the nearest PartitionInterval multiple + _, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + return p.headPartition, err + } + if t >= p.headPartition.startTime { + if (t - p.headPartition.startTime) >= p.currentPartitionInterval { + _, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + if err != nil { + return nil, err + } + } + return p.headPartition, nil + } + // Iterate backwards; ignore the last element as it's the head partition + for i := len(p.partitions) - 2; i >= 0; i-- { + if t >= p.partitions[i].startTime { + if t < p.partitions[i].GetEndTime() { + return p.partitions[i], nil + } + part, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + if err != nil { + return nil, err + } + return part, nil + } + } + head := p.headPartition + part, _ := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + p.headPartition = head + return part, nil +} + +func (p *PartitionManager) createAndUpdatePartition(t int64) (*DBPartition, error) { + time := t & 0x7FFFFFFFFFFFFFF0 + partPath := path.Join(p.Path(), strconv.FormatInt(time/1000, 10)) + "/" + partition, err := NewDBPartition(p, time, partPath) + if err != nil { + return nil, err + } + p.currentPartitionInterval = partition.partitionInterval + + schemaPartition := &config.Partition{StartTime: partition.startTime, SchemaInfo: p.schemaConfig.PartitionSchemaInfo} + if p.headPartition == nil || time > p.headPartition.startTime { + p.headPartition = partition + p.partitions = append(p.partitions, partition) + p.schemaConfig.Partitions = append(p.schemaConfig.Partitions, schemaPartition) + } else { + for i, part := range p.partitions { + if part.startTime > time { + p.partitions = append(p.partitions, nil) + copy(p.partitions[i+1:], p.partitions[i:]) + p.partitions[i] = partition + + p.schemaConfig.Partitions = append(p.schemaConfig.Partitions, nil) + copy(p.schemaConfig.Partitions[i+1:], p.schemaConfig.Partitions[i:]) + p.schemaConfig.Partitions[i] = schemaPartition + break + } + } + } + + err = p.updateSchema() + return partition, err +} + +func (p *PartitionManager) updateSchema() error { + var outerError error + metricReporter := performance.ReporterInstanceFromConfig(p.v3ioConfig) + metricReporter.WithTimer("UpdateSchemaTimer", func() { + // updating schema version and copying partitions to kv table. + p.schemaConfig.TableSchemaInfo.Version = schema.Version + + data, err := json.Marshal(p.schemaConfig) + if err != nil { + outerError = errors.Wrap(err, "Failed to update a new partition in the schema file.") + return + } + schemaFilePath := p.GetSchemaFilePath() + if p.container != nil { // Tests use case only + err = p.container.PutObjectSync(&v3io.PutObjectInput{Path: schemaFilePath, Body: data}) + if err != nil { + outerError = err + return + } + attributes := make(map[string]interface{}, len(p.partitions)) + for _, part := range p.partitions { + marshalledPartition, err := json.Marshal(part.ToMap()) + if err != nil { + outerError = err + return + } + attributes[part.GetPartitionAttributeName()] = marshalledPartition + } + + input := &v3io.PutItemInput{Path: schemaFilePath, Attributes: attributes} + _, err := p.container.PutItemSync(input) + + if err != nil { + outerError = errors.Wrap(err, "failed to update partitions table.") + return + } + } + }) + + return outerError +} + +func (p *PartitionManager) DeletePartitionsFromSchema(partitionsToDelete []*DBPartition) error { + for i := len(p.partitions) - 1; i >= 0; i-- { + for _, partToDelete := range partitionsToDelete { + if p.partitions[i].startTime == partToDelete.startTime { + p.partitions = append(p.partitions[:i], p.partitions[i+1:]...) + break + } + } + + } + for i := len(p.schemaConfig.Partitions) - 1; i >= 0; i-- { + for _, partToDelete := range partitionsToDelete { + if p.schemaConfig.Partitions[i].StartTime == partToDelete.startTime { + p.schemaConfig.Partitions = append(p.schemaConfig.Partitions[:i], p.schemaConfig.Partitions[i+1:]...) + break + } + } + } + + // Delete from partitions KV table + if p.container != nil { // Tests use case only + deletePartitionExpression := strings.Builder{} + for _, partToDelete := range partitionsToDelete { + deletePartitionExpression.WriteString("delete(") + deletePartitionExpression.WriteString(partToDelete.GetPartitionAttributeName()) + deletePartitionExpression.WriteString(");") + } + expression := deletePartitionExpression.String() + _, err := p.container.UpdateItemSync(&v3io.UpdateItemInput{Path: p.GetSchemaFilePath(), Expression: &expression}) + if err != nil { + return err + } + } + + return p.updateSchema() +} + +func (p *PartitionManager) ReadAndUpdateSchema() (err error) { + metricReporter, err := performance.DefaultReporterInstance() + if err != nil { + err = errors.Wrap(err, "Unable to initialize the performance-metrics reporter.") + return + } + + schemaFilePath := p.GetSchemaFilePath() + if err != nil { + err = errors.Wrap(err, "Failed to create timer ReadAndUpdateSchemaTimer.") + return + } + schemaInfoResp, err := p.container.GetItemSync(&v3io.GetItemInput{Path: schemaFilePath, AttributeNames: []string{"**"}}) + if err != nil { + err = errors.Wrapf(err, "Failed to read schema at path '%s'.", schemaFilePath) + return + } + + schemaGetItemResponse := schemaInfoResp.Output.(*v3io.GetItemOutput) + mtimeSecs, err := schemaGetItemResponse.Item.GetFieldInt("__mtime_secs") + if err != nil { + err = errors.Wrapf(err, "Failed to get start time (mtime) in seconds from the schema at '%s'.", schemaFilePath) + return + } + mtimeNsecs, err := schemaGetItemResponse.Item.GetFieldInt("__mtime_nsecs") + if err != nil { + err = errors.Wrapf(err, "Failed to get start time (mtime) in nanoseconds from the schema at '%s'.", schemaFilePath) + return + } + + // Get schema only if the schema has changed + if mtimeSecs > p.schemaMtimeSecs || (mtimeSecs == p.schemaMtimeSecs && mtimeNsecs > p.schemaMtimeNanosecs) { + p.schemaMtimeSecs = mtimeSecs + p.schemaMtimeNanosecs = mtimeNsecs + + metricReporter.WithTimer("ReadAndUpdateSchemaTimer", func() { + err = p.updatePartitionsFromSchema(nil, schemaGetItemResponse) + return + }) + } + return +} + +func (p *PartitionManager) updatePartitionsFromSchema(schemaConfig *config.Schema, schemaGetItemResponse *v3io.GetItemOutput) error { + var currentSchemaVersion int + if schemaConfig == nil { + currentSchemaVersion = p.schemaConfig.TableSchemaInfo.Version + } else { + currentSchemaVersion = schemaConfig.TableSchemaInfo.Version + } + + if currentSchemaVersion == 4 && p.v3ioConfig.LoadPartitionsFromSchemaAttr { + return p.newLoadPartitions(schemaGetItemResponse) + } + + return p.oldLoadPartitions(schemaConfig) +} + +func (p *PartitionManager) oldLoadPartitions(schema *config.Schema) error { + if schema == nil { + schemaFilePath := p.GetSchemaFilePath() + resp, innerError := p.container.GetObjectSync(&v3io.GetObjectInput{Path: schemaFilePath}) + if innerError != nil { + return errors.Wrapf(innerError, "Failed to read schema at path '%s'.", schemaFilePath) + } + + schema = &config.Schema{} + innerError = json.Unmarshal(resp.Body(), schema) + if innerError != nil { + return errors.Wrapf(innerError, "Failed to unmarshal schema at path '%s'.", schemaFilePath) + } + } + + p.partitions = []*DBPartition{} + for _, part := range schema.Partitions { + partPath := path.Join(p.Path(), strconv.FormatInt(part.StartTime/1000, 10)) + "/" + newPart, err := NewDBPartition(p, part.StartTime, partPath) + if err != nil { + return err + } + p.partitions = append(p.partitions, newPart) + if p.headPartition == nil { + p.headPartition = newPart + } else if p.headPartition.startTime < newPart.startTime { + p.headPartition = newPart + } + } + return nil +} + +func (p *PartitionManager) newLoadPartitions(schemaAttributesResponse *v3io.GetItemOutput) error { + if p.container == nil { // Tests use case only + return nil + } + + if schemaAttributesResponse == nil { + schemaFilePath := p.GetSchemaFilePath() + schemaInfoResp, err := p.container.GetItemSync(&v3io.GetItemInput{Path: schemaFilePath, AttributeNames: []string{"*"}}) + if err != nil { + return errors.Wrapf(err, "Failed to read schema at path '%s'.", schemaFilePath) + } + + schemaAttributesResponse = schemaInfoResp.Output.(*v3io.GetItemOutput) + } + + p.partitions = []*DBPartition{} + for partitionStartTime, partitionAttrBlob := range schemaAttributesResponse.Item { + // Only process "partition" attributes + if !strings.HasPrefix(partitionStartTime, partitionAttributePrefix) { + continue + } + intStartTime, err := strconv.ParseInt(partitionStartTime[1:], 10, 64) + if err != nil { + return errors.Wrapf(err, "invalid partition name '%v'", partitionStartTime) + } + + partPath := path.Join(p.Path(), strconv.FormatInt(intStartTime/1000, 10)) + "/" + + partitionAttr := make(map[string]interface{}, 5) + err = json.Unmarshal(partitionAttrBlob.([]byte), &partitionAttr) + if err != nil { + return err + } + newPart, err := NewDBPartitionFromMap(p, intStartTime, partPath, partitionAttr) + if err != nil { + return err + } + p.partitions = append(p.partitions, newPart) + if p.headPartition == nil { + p.headPartition = newPart + } else if p.headPartition.startTime < newPart.startTime { + p.headPartition = newPart + } + } + + sort.SliceStable(p.partitions, func(i, j int) bool { + return p.partitions[i].startTime < p.partitions[j].startTime + }) + + return nil +} + +//if inclusive is true than partial partitions (not fully in range) will be retireved as well +func (p *PartitionManager) PartsForRange(mint, maxt int64, inclusive bool) []*DBPartition { + var parts []*DBPartition + for _, part := range p.partitions { + if (mint < part.GetStartTime() && maxt > part.GetEndTime()) || (inclusive && (part.InRange(mint) || part.InRange(maxt))) { + parts = append(parts, part) + } + } + return parts +} + +type DBPartition struct { + manager *PartitionManager + path string // Full path to the partition within the DB + startTime int64 // Start time + partitionInterval int64 // Number of msecs stored in the partition + chunkInterval int64 // Number of msecs stored in each chunk + prefix string // Path prefix + retentionDays int // Keep samples for N hours + defaultRollups aggregate.AggrType // Default aggregation functions to apply on sample update + rollupTime int64 // Time range per aggregation bucket + rollupBuckets int // Total number of aggregation buckets per partition +} + +// Create and initialize a new partition +func NewDBPartition(pmgr *PartitionManager, startTime int64, path string) (*DBPartition, error) { + rollupTime, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.AggregationGranularity) + if err != nil { + return nil, err + } + partitionInterval, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.PartitionerInterval) + if err != nil { + return nil, err + } + chunkInterval, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.ChunckerInterval) + if err != nil { + return nil, err + } + newPart := DBPartition{ + manager: pmgr, + path: path, + startTime: startTime, + partitionInterval: partitionInterval, + chunkInterval: chunkInterval, + prefix: "", + retentionDays: pmgr.schemaConfig.PartitionSchemaInfo.SampleRetention, + rollupTime: rollupTime, + } + + aggrType, _, err := aggregate.AggregatesFromStringListWithCount(pmgr.schemaConfig.PartitionSchemaInfo.Aggregates) + if err != nil { + return nil, err + } + newPart.defaultRollups = aggrType + if rollupTime != 0 { + newPart.rollupBuckets = int(math.Ceil(float64(partitionInterval) / float64(rollupTime))) + } + + return &newPart, nil +} + +// Create and initialize a new partition +func NewDBPartitionFromMap(pmgr *PartitionManager, startTime int64, path string, item v3io.Item) (*DBPartition, error) { + rollupTime, err := item.GetFieldInt("rollupTime") + if err != nil { + return nil, fmt.Errorf("failed to parse rollupTime for partition: %v, rollup: %v", startTime, item.GetField("rollupTime")) + } + + partitionInterval, err := item.GetFieldInt("partitionInterval") + if err != nil { + return nil, fmt.Errorf("failed to parse partitionInterval for partition: %v, interval: %v", startTime, item.GetField("partitionInterval")) + } + + chunkInterval, err := item.GetFieldInt("chunkInterval") + if err != nil { + return nil, fmt.Errorf("failed to parse chunk Interval for partition: %v, interval: %v", startTime, item.GetField("chunkInterval")) + } + + retention, err := item.GetFieldInt("retentionDays") + if err != nil { + return nil, errors.Wrapf(err, "failed to parse retention days for partition: %v, retention: %v", startTime, item.GetField("retentionDays")) + } + + stringAggregates, err := item.GetFieldString("aggregates") + if err != nil { + return nil, errors.Wrapf(err, "failed to parse aggregates for partition: %v, aggregates: %v", startTime, item.GetField("aggregates")) + } + mask, _, err := aggregate.AggregatesFromStringListWithCount(strings.Split(stringAggregates, ",")) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse aggregates from string for partition: %v, aggregates: %v", startTime, stringAggregates) + } + + newPart := DBPartition{ + manager: pmgr, + path: path, + startTime: startTime, + partitionInterval: int64(partitionInterval), + chunkInterval: int64(chunkInterval), + prefix: "", + retentionDays: retention, + rollupTime: int64(rollupTime), + defaultRollups: mask, + } + + if rollupTime != 0 { + newPart.rollupBuckets = int(math.Ceil(float64(partitionInterval) / float64(rollupTime))) + } + + return &newPart, nil +} + +func (p *DBPartition) PreAggregates() []config.PreAggregate { + return p.manager.GetConfig().TableSchemaInfo.PreAggregates +} + +func (p *DBPartition) IsCyclic() bool { + return p.manager.cyclic +} + +// Return the time range covered by a single chunk (the chunk interval) +func (p *DBPartition) TimePerChunk() int64 { + return p.chunkInterval +} + +func (p *DBPartition) NextPart(t int64) (*DBPartition, error) { + return p.manager.TimeToPart(t) +} + +func (p *DBPartition) GetStartTime() int64 { + return p.startTime +} + +func (p *DBPartition) GetEndTime() int64 { + return p.startTime + p.partitionInterval - 1 +} + +// Return the path to this partition's TSDB table +func (p *DBPartition) GetTablePath() string { + return p.path +} + +// Return the name of this partition's attribute name +func (p *DBPartition) GetPartitionAttributeName() string { + return fmt.Sprintf("%v%v", partitionAttributePrefix, strconv.FormatInt(p.startTime, 10)) +} + +// Return a list of sharding keys matching the given item name +func (p *DBPartition) GetShardingKeys(name string) []string { + shardingKeysNum := p.manager.schemaConfig.TableSchemaInfo.ShardingBucketsCount + var res = make([]string, 0, shardingKeysNum) + for i := 0; i < shardingKeysNum; i++ { + // Trailing period ('.') for range-scan queries + res = append(res, fmt.Sprintf("%s_%x.", name, i)) + } + + return res +} + +// Return the full path to the specified metric item +func (p *DBPartition) GetMetricPath(name string, hash uint64, labelNames []string, isAggr bool) string { + agg := "" + if isAggr { + if len(labelNames) == 0 { + agg = "agg/" + } else { + var namelessLabelNames []string + for _, l := range labelNames { + if l != config.PrometheusMetricNameAttribute { + namelessLabelNames = append(namelessLabelNames, l) + } + } + agg = fmt.Sprintf("agg/%s/", strings.Join(namelessLabelNames, ",")) + } + } + return fmt.Sprintf("%s%s%s_%x.%016x", p.path, agg, name, int(hash%uint64(p.GetHashingBuckets())), hash) +} + +func (p *DBPartition) AggrType() aggregate.AggrType { + return p.defaultRollups +} + +func (p *DBPartition) AggrBuckets() int { + return p.rollupBuckets +} + +func (p *DBPartition) RollupTime() int64 { + return p.rollupTime +} + +// Return the aggregation bucket ID for the specified time +func (p *DBPartition) Time2Bucket(t int64) int { + if p.rollupTime == 0 { + return 0 + } + if t > p.GetEndTime() { + return p.rollupBuckets - 1 + } + if t < p.GetStartTime() { + return 0 + } + return int((t - p.startTime) / p.rollupTime) +} + +// Return the start time of an aggregation bucket by id +func (p *DBPartition) GetAggregationBucketStartTime(id int) int64 { + return p.startTime + int64(id)*p.rollupTime +} + +// Return the end time of an aggregation bucket by id +func (p *DBPartition) GetAggregationBucketEndTime(id int) int64 { + return p.startTime + int64(id+1)*p.rollupTime - 1 +} + +func (p *DBPartition) Times2BucketRange(start, end int64) []int { + var buckets []int + + if start > p.GetEndTime() || end < p.startTime { + return buckets + } + + startingAggrBucket := p.Time2Bucket(start) + endAggrBucket := p.Time2Bucket(end) + + for bucketID := startingAggrBucket; bucketID <= endAggrBucket; bucketID++ { + buckets = append(buckets, bucketID) + } + + return buckets +} + +// Return the nearest chunk start time for the specified time +func (p *DBPartition) GetChunkMint(t int64) int64 { + if t > p.GetEndTime() { + return p.GetEndTime() - p.chunkInterval + 1 + } + if t < p.GetStartTime() { + return p.startTime + } + return p.chunkInterval * (t / p.chunkInterval) +} + +// Check whether the specified time (t) is within the range of the chunk starting at the specified start time (mint) +func (p *DBPartition) InChunkRange(mint, t int64) bool { + return t >= mint && t < (mint+p.chunkInterval) +} + +// Check whether the specified time (t) is ahead of the range of the chunk starting at the specified start time (mint) +func (p *DBPartition) IsAheadOfChunk(mint, t int64) bool { + return t >= (mint + p.chunkInterval) +} + +// Return the ID of the chunk whose range includes the specified time +func (p *DBPartition) TimeToChunkID(tmilli int64) (int, error) { + if tmilli >= p.startTime && tmilli <= p.GetEndTime() { + return int((tmilli-p.startTime)/p.chunkInterval) + 1, nil + } + return -1, errors.Errorf("Time %d isn't within the range of this partition.", tmilli) +} + +// Check if a chunk (by attribute name) is in the given time range. +func (p *DBPartition) IsChunkInRangeByAttr(attr string, mint, maxt int64) bool { + + // Discard '_v' prefix + chunkIDStr := attr[2:] + chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) + if err != nil { + return false + } + + chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval + chunkEndTime := chunkStartTime + p.chunkInterval - 1 + + return mint <= chunkStartTime && maxt >= chunkEndTime +} + +// Get a chunk's start time by it's attribute name +func (p *DBPartition) GetChunkStartTimeByAttr(attr string) (int64, error) { + + // Discard '_v' prefix + chunkIDStr := attr[2:] + chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) + if err != nil { + return 0, err + } + + chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval + + return chunkStartTime, nil +} + +// Check whether the specified time is within the range of this partition +func (p *DBPartition) InRange(t int64) bool { + if p.manager.cyclic { + return true + } + return t >= p.startTime && t <= p.GetEndTime() +} + +// Return the start time (mint) and end time (maxt) for this partition; +// maxt may be required for a cyclic partition +func (p *DBPartition) GetPartitionRange() (int64, int64) { + // Start p.days ago, rounded to next hour + return p.startTime, p.startTime + p.partitionInterval +} + +// Return the attribute name of the given chunk +func (p *DBPartition) ChunkID2Attr(col string, id int) string { + return fmt.Sprintf("_%s%d", col, id) +} + +// Return the attributes that need to be retrieved for the specified time range +func (p *DBPartition) Range2Attrs(col string, mint, maxt int64) ([]string, int64) { + list := p.Range2Cids(mint, maxt) + var strList []string + for _, id := range list { + strList = append(strList, p.ChunkID2Attr(col, id)) + } + + var firstAttrTime int64 + if mint < p.startTime { + firstAttrTime = p.startTime + } else { + firstAttrTime = p.startTime + ((mint-p.startTime)/p.chunkInterval)*p.chunkInterval + } + return strList, firstAttrTime +} + +// Return a list of all the chunk IDs that match the specified time range +func (p *DBPartition) Range2Cids(mint, maxt int64) []int { + var list []int + start, err := p.TimeToChunkID(mint) + if err != nil { + start = 1 + } + end, err := p.TimeToChunkID(maxt) + if err != nil { + end = int(p.partitionInterval / p.chunkInterval) + } + for i := start; i <= end; i++ { + list = append(list, i) + } + return list +} + +func (p *DBPartition) GetHashingBuckets() int { + return p.manager.schemaConfig.TableSchemaInfo.ShardingBucketsCount +} + +func (p *DBPartition) ToMap() map[string]interface{} { + attributes := make(map[string]interface{}, 5) + attributes["aggregates"] = aggregate.MaskToString(p.AggrType()) + attributes["rollupTime"] = p.rollupTime + attributes["chunkInterval"] = p.chunkInterval + attributes["partitionInterval"] = p.partitionInterval + attributes["retentionDays"] = p.retentionDays + return attributes +} + +// Convert a time in milliseconds to day and hour integers +func TimeToDHM(tmilli int64) (int, int) { + t := int(tmilli / 1000) + h := (t / 3600) % 24 + d := t / 3600 / 24 + return d, h +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr_test.go new file mode 100644 index 00000000..f661f65a --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr_test.go @@ -0,0 +1,176 @@ +// +build unit + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package partmgr + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" +) + +func TestCreateNewPartition(tst *testing.T) { + manager := getPartitionManager(tst) + interval := manager.currentPartitionInterval + startTime := interval + 1 + + // First partition + part, err := manager.TimeToPart(startTime + interval) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + assert.Equal(tst, 1, len(manager.partitions)) + assert.Equal(tst, manager.headPartition, part) + + // New head + part, err = manager.TimeToPart(startTime + (interval * 3)) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + assert.Equal(tst, 2, len(manager.partitions)) + assert.Equal(tst, manager.headPartition, part) + + // Add first + part, err = manager.TimeToPart(startTime) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + assert.Equal(tst, 3, len(manager.partitions)) + assert.Equal(tst, manager.partitions[0], part) +} + +func getPartitionManager(tst *testing.T) *PartitionManager { + const dummyConfig = `path: "/test"` + v3ioConfig, err := config.GetOrLoadFromData([]byte(dummyConfig)) + if err != nil { + tst.Fatalf("Failed to obtain a TSDB configuration. Error: %v", err) + } + + schm, err := schema.NewSchema(v3ioConfig, "1/s", "1h", "*", "") + if err != nil { + tst.Fatalf("Failed to create a TSDB schema. Error: %v", err) + } + + manager, err := NewPartitionMngr(schm, nil, v3ioConfig) + if err != nil { + tst.Fatalf("Failed to create a partition manager. Error: %v", err) + } + + return manager +} + +func TestNewPartitionMngrBadInput(t *testing.T) { + schemaConfig := &config.Schema{ + Partitions: []*config.Partition{{}}, + PartitionSchemaInfo: config.PartitionSchema{ + AggregationGranularity: "boo", + }, + } + v3ioConfig, err := config.GetOrLoadFromStruct(&config.V3ioConfig{}) + assert.NoError(t, err) + _, err = NewPartitionMngr(schemaConfig, nil, v3ioConfig) + assert.Error(t, err) +} + +func TestPartsForRange(tst *testing.T) { + numPartitions := 5 + manager := getPartitionManager(tst) + interval := manager.currentPartitionInterval + for i := 1; i <= numPartitions; i++ { + _, err := manager.TimeToPart(interval * int64(i)) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + } + assert.Equal(tst, numPartitions, len(manager.partitions)) + // Get all partitions + assert.Equal(tst, manager.partitions, manager.PartsForRange(0, interval*int64(numPartitions+1), true)) + // Get no partitions + assert.Equal(tst, 0, len(manager.PartsForRange(0, interval-1, true)), true) + // Get the first 2 partitions + parts := manager.PartsForRange(0, interval*2+1, true) + assert.Equal(tst, 2, len(parts)) + assert.Equal(tst, manager.partitions[0], parts[0]) + assert.Equal(tst, manager.partitions[1], parts[1]) + // Get the middle 3 partitions + parts = manager.PartsForRange(interval*2, interval*4+1, true) + assert.Equal(tst, 3, len(parts)) + assert.Equal(tst, manager.partitions[1], parts[0]) + assert.Equal(tst, manager.partitions[2], parts[1]) + assert.Equal(tst, manager.partitions[3], parts[2]) + // Get the middle partition by inclusive=false + parts = manager.PartsForRange(interval*2+1, interval*4+1, false) + assert.Equal(tst, 1, len(parts)) + assert.Equal(tst, manager.partitions[2], parts[0]) + // Get the middle partition by inclusive=false + parts = manager.PartsForRange(interval*2-1, interval*4+1, false) + assert.Equal(tst, 2, len(parts)) + assert.Equal(tst, manager.partitions[1], parts[0]) + assert.Equal(tst, manager.partitions[2], parts[1]) +} + +func TestTime2Bucket(tst *testing.T) { + manager := getPartitionManager(tst) + part, _ := manager.TimeToPart(1000000) + assert.Equal(tst, 0, part.Time2Bucket(100)) + assert.Equal(tst, part.rollupBuckets-1, part.Time2Bucket(part.startTime+part.partitionInterval+1)) + assert.Equal(tst, part.rollupBuckets/2, part.Time2Bucket((part.startTime+part.partitionInterval)/2)) +} + +func TestGetChunkMint(tst *testing.T) { + manager := getPartitionManager(tst) + part, err := manager.TimeToPart(manager.currentPartitionInterval) + assert.Nil(tst, err, "Failed converting time to a partition.") + if err != nil { + tst.FailNow() + } + assert.Equal(tst, part.startTime, part.GetChunkMint(0)) + assert.Equal(tst, part.startTime, part.GetChunkMint(part.startTime+1)) + assert.Equal(tst, part.startTime+part.chunkInterval, part.GetChunkMint(part.startTime+part.chunkInterval+100)) + assert.Equal(tst, part.GetEndTime()-part.chunkInterval+1, part.GetChunkMint(part.GetEndTime()+100)) +} + +func TestInRange(tst *testing.T) { + manager := getPartitionManager(tst) + part, _ := manager.TimeToPart(manager.currentPartitionInterval) + assert.Equal(tst, false, part.InRange(part.GetStartTime()-100)) + assert.Equal(tst, false, part.InRange(part.GetEndTime()+100)) + assert.Equal(tst, true, part.InRange(part.GetStartTime()+part.partitionInterval/2)) +} + +func TestRange2Cids(tst *testing.T) { + manager := getPartitionManager(tst) + part, _ := manager.TimeToPart(manager.currentPartitionInterval) + numChunks := int(part.partitionInterval / part.chunkInterval) + var cids []int + for i := 1; i <= numChunks; i++ { + cids = append(cids, i) + } + assert.Equal(tst, cids, part.Range2Cids(0, part.GetEndTime()+100)) + assert.Equal(tst, []int{3, 4, 5}, part.Range2Cids(part.startTime+2*part.chunkInterval, part.startTime+5*part.chunkInterval-1)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go new file mode 100644 index 00000000..c79e7c61 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go @@ -0,0 +1,283 @@ +package pquerier + +import ( + "strings" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Chunk-list series iterator +type RawChunkIterator struct { + mint, maxt, aggregationWindow int64 + + chunks []chunkenc.Chunk + encoding chunkenc.Encoding + + chunkIndex int + chunksMax []int64 + iter chunkenc.Iterator + log logger.Logger + + prevT int64 + prevV float64 +} + +func newRawChunkIterator(queryResult *qryResults, log logger.Logger) utils.SeriesIterator { + maxt := queryResult.query.maxt + maxTime := queryResult.fields[config.MaxTimeAttrName] + if maxTime != nil && int64(maxTime.(int)) < maxt { + maxt = int64(maxTime.(int)) + } + + var aggregationWindow int64 + if queryResult.query.aggregationParams != nil { + aggregationWindow = queryResult.query.aggregationParams.GetAggregationWindow() + } + newIterator := RawChunkIterator{ + mint: queryResult.query.mint, + maxt: maxt, + aggregationWindow: aggregationWindow, + log: log.GetChild("rawChunkIterator"), + encoding: queryResult.encoding} + + newIterator.AddChunks(queryResult) + + if len(newIterator.chunks) == 0 { + // If there's no data, create a null iterator + return &utils.NullSeriesIterator{} + } + newIterator.iter = newIterator.chunks[0].Iterator() + return &newIterator +} + +// Advance the iterator to the specified chunk and time +func (it *RawChunkIterator) Seek(t int64) bool { + + // Seek time is after the item's end time (maxt) + if t > it.maxt { + return false + } + + // Seek to the first valid value after t + if t < it.mint-it.aggregationWindow { + t = it.mint - it.aggregationWindow + } + + // Check the first element + t0, _ := it.iter.At() + if t0 > it.maxt { + return false + } + if t <= t0 { + return true + } + + for { + it.updatePrevPoint() + if it.iter.Next() { + t0, _ := it.iter.At() + if t0 > it.maxt { + return false + } + if t > it.chunksMax[it.chunkIndex] { + // This chunk is too far behind; move to the next chunk or + // Return false if it's the last chunk + if it.chunkIndex == len(it.chunks)-1 { + return false + } + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + } else if t <= t0 { + // The cursor (t0) is either on t or just passed t + return true + } + } else { + // End of chunk; move to the next chunk or return if last + if it.chunkIndex == len(it.chunks)-1 { + return false + } + + // Free up memory of old chunk + it.chunks[it.chunkIndex] = nil + + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + } + } +} + +func (it *RawChunkIterator) updatePrevPoint() { + t, v := it.At() + if !(t == 0 && v == 0) { + it.prevT, it.prevV = t, v + } +} + +// Move to the next iterator item +func (it *RawChunkIterator) Next() bool { + it.updatePrevPoint() + if it.iter.Next() { + t, _ := it.iter.At() + if t < it.mint-it.aggregationWindow { + if !it.Seek(it.mint) { + return false + } + t, _ = it.At() + + return t <= it.maxt + } + if t <= it.maxt { + return true + } + return false + } + + if err := it.iter.Err(); err != nil { + return false + } + if it.chunkIndex == len(it.chunks)-1 { + return false + } + + // Free up memory of old chunk + it.chunks[it.chunkIndex] = nil + + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + return it.Next() +} + +// Read the time and value at the current location +func (it *RawChunkIterator) At() (t int64, v float64) { return it.iter.At() } + +func (it *RawChunkIterator) AtString() (t int64, v string) { return it.iter.AtString() } + +func (it *RawChunkIterator) Err() error { return it.iter.Err() } + +func (it *RawChunkIterator) Encoding() chunkenc.Encoding { return it.encoding } + +func (it *RawChunkIterator) AddChunks(item *qryResults) { + var chunks []chunkenc.Chunk + var chunksMax []int64 + if item.query.maxt > it.maxt { + it.maxt = item.query.maxt + } + if item.query.mint < it.mint { + it.mint = item.query.mint + } + _, firstChunkTime := item.query.partition.Range2Attrs("v", it.mint, it.maxt) + // Create and initialize a chunk encoder per chunk blob + i := 0 + for _, attr := range item.query.attrs { + + // In case we get both raw chunks and server aggregates, only go over the chunks. + if !strings.Contains(attr, config.AggregateAttrPrefix) { + values := item.fields[attr] + if values != nil { + bytes := values.([]byte) + + chunk, err := chunkenc.FromData(it.log, it.encoding, bytes, 0) + if err != nil { + it.log.ErrorWith("Error reading chunk buffer", "columns", item.query.attrs, "err", err) + } else { + chunks = append(chunks, chunk) + // Calculate the end time for the current chunk + chunksMax = append(chunksMax, + firstChunkTime+int64(i+1)*item.query.partition.TimePerChunk()-1) + } + } + i++ + } + } + + // Add new chunks sorted + if len(chunksMax) != 0 { + if len(it.chunksMax) == 0 || it.chunksMax[len(it.chunksMax)-1] < chunksMax[0] { + it.chunks = append(it.chunks, chunks...) + it.chunksMax = append(it.chunksMax, chunksMax...) + } else { + for i := 0; i < len(it.chunksMax); i++ { + if it.chunksMax[i] > chunksMax[0] { + endChunks := append(chunks, it.chunks[i:]...) + it.chunks = append(it.chunks[:i], endChunks...) + + endMaxChunks := append(chunksMax, it.chunksMax[i:]...) + it.chunksMax = append(it.chunksMax[:i], endMaxChunks...) + + // If we are inserting a new chunk to the beginning set the current iterator to the new first chunk + if i == 0 { + it.iter = it.chunks[0].Iterator() + } + break + } + } + } + } +} + +func (it *RawChunkIterator) PeakBack() (t int64, v float64) { return it.prevT, it.prevV } + +func NewRawSeries(results *qryResults, logger logger.Logger) (utils.Series, error) { + newSeries := V3ioRawSeries{fields: results.fields, logger: logger, encoding: results.encoding} + err := newSeries.initLabels() + if err != nil { + return nil, err + } + newSeries.iter = newRawChunkIterator(results, logger) + return &newSeries, nil +} + +type V3ioRawSeries struct { + fields map[string]interface{} + lset utils.Labels + iter utils.SeriesIterator + logger logger.Logger + hash uint64 + encoding chunkenc.Encoding +} + +func (s *V3ioRawSeries) Labels() utils.Labels { return s.lset } + +// Get the unique series key for sorting +func (s *V3ioRawSeries) GetKey() uint64 { + if s.hash == 0 { + s.hash = s.lset.Hash() + } + return s.hash +} + +func (s *V3ioRawSeries) Iterator() utils.SeriesIterator { return s.iter } + +func (s *V3ioRawSeries) AddChunks(results *qryResults) { + switch iter := s.iter.(type) { + case *RawChunkIterator: + iter.AddChunks(results) + case *utils.NullSeriesIterator: + s.iter = newRawChunkIterator(results, s.logger) + } +} + +// Initialize the label set from _lset and _name attributes +func (s *V3ioRawSeries) initLabels() error { + name, ok := s.fields[config.MetricNameAttrName].(string) + if !ok { + return errors.Errorf("error in initLabels; bad metric name: %v", s.fields[config.MetricNameAttrName].(string)) + } + lsetAttr, ok := s.fields[config.LabelSetAttrName].(string) + if !ok { + return errors.Errorf("error in initLabels; bad labels set: %v", s.fields[config.LabelSetAttrName].(string)) + } + + lset, err := utils.LabelsFromStringWithName(name, lsetAttr) + + if err != nil { + return errors.Errorf("error in initLabels; failed to parse labels set string: %v. err: %v", s.fields[config.LabelSetAttrName].(string), err) + } + + s.lset = lset + return nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go new file mode 100644 index 00000000..28030d2a --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go @@ -0,0 +1,118 @@ +// +build integration + +package pquerier_test + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const baseTestTime = int64(1547510400000) // 15/01/2019 00:00:00 + +type testRawChunkIterSuite struct { + suite.Suite + v3ioConfig *config.V3ioConfig + suiteTimestamp int64 +} + +func (suite *testRawChunkIterSuite) SetupSuite() { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + suite.Require().NoError(err) + + suite.v3ioConfig = v3ioConfig + suite.suiteTimestamp = time.Now().Unix() +} + +func (suite *testRawChunkIterSuite) SetupTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + tsdbtest.CreateTestTSDB(suite.T(), suite.v3ioConfig) +} + +func (suite *testRawChunkIterSuite) TearDownTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + if !suite.T().Failed() { + tsdbtest.DeleteTSDB(suite.T(), suite.v3ioConfig) + } +} + +func (suite *testRawChunkIterSuite) TestRawChunkIteratorWithZeroValue() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err) + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + ingestData := []tsdbtest.DataPoint{{baseTestTime, 10}, + {baseTestTime + tsdbtest.MinuteInMillis, 0}, + {baseTestTime + 2*tsdbtest.MinuteInMillis, 30}, + {baseTestTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err) + + params, _, _ := pquerier.ParseQuery("select cpu") + params.From = baseTestTime + params.To = baseTestTime + int64(numberOfEvents*eventsInterval) + + set, err := querierV2.Select(params) + suite.Require().NoError(err) + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator().(*pquerier.RawChunkIterator) + + var index int + for iter.Next() { + t, v := iter.At() + prevT, prevV := iter.PeakBack() + + suite.Require().Equal(ingestData[index].Time, t, "current time does not match") + + switch val := ingestData[index].Value.(type) { + case float64: + suite.Require().Equal(val, v, "current value does not match") + case int: + suite.Require().Equal(float64(val), v, "current value does not match") + default: + suite.Require().Equal(val, v, "current value does not match") + } + + if index > 0 { + suite.Require().Equal(ingestData[index-1].Time, prevT, "current time does not match") + switch val := ingestData[index-1].Value.(type) { + case float64: + suite.Require().Equal(val, prevV, "current value does not match") + case int: + suite.Require().Equal(float64(val), prevV, "current value does not match") + default: + suite.Require().Equal(val, prevV, "current value does not match") + } + } + index++ + } + } + + suite.Require().Equal(1, seriesCount, "series count didn't match expected") +} + +func TestRawChunkIterSuite(t *testing.T) { + suite.Run(t, new(testRawChunkIterSuite)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go new file mode 100644 index 00000000..4be0430b --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go @@ -0,0 +1,373 @@ +package pquerier + +import ( + "encoding/binary" + "math" + + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +/* main query flow logic + +fire GetItems to all partitions and tables + +iterate over results from first to last partition + hash lookup (over labels only w/o name) to find dataFrame + if not found create new dataFrame + based on hash dispatch work to one of the parallel collectors + collectors convert raw/array data to series and aggregate or group + +once collectors are done (wg.done) return SeriesSet (prom compatible) or FrameSet (iguazio column interface) + final aggregators (Avg, Stddav/var, ..) are formed from raw aggr in flight via iterators + - Series: have a single name and optional aggregator per time series, values limited to Float64 + - Frames: have index/time column(s) and multiple named/typed value columns (one per metric name * function) + +** optionally can return SeriesSet (from dataFrames) to Prom immediately after we completed GetItems iterators + and block (wg.done) the first time Prom tries to access the SeriesIter data (can lower latency) + + if result set needs to be ordered we can also sort the dataFrames based on Labels data (e.g. order-by username) + in parallel to having all the time series data processed by the collectors + +*/ + +/* collector logic: + +- get qryResults from chan + +- if raw query + if first partition + create series + else + append chunks to existing series + +- if vector query (results are bucketed over time or grouped by) + if first partition + create & init array per function (and per name) based on query metadata/results + + init child raw-chunk or attribute iterators + iterate over data and fill bucketed arrays + if missing time or data use interpolation + +- if got fin message and processed last item + use sync waitgroup to signal the main that the go routines are done + will allow main flow to continue and serve the results, no locks are required + +*/ + +// Main collector which processes query results from a channel and then dispatches them according to query type. +// Query types: raw data, server-side aggregates, client-side aggregates +func mainCollector(ctx *selectQueryContext, responseChannel chan *qryResults) { + defer ctx.wg.Done() + + lastTimePerMetric := make(map[uint64]int64, len(ctx.columnsSpecByMetric)) + lastValuePerMetric := make(map[uint64]float64, len(ctx.columnsSpecByMetric)) + + for { + select { + case _ = <-ctx.stopChan: + return + case res, ok := <-responseChannel: + if !ok { + return + } + if res.IsRawQuery() { + err := rawCollector(ctx, res) + if err != nil { + ctx.errorChannel <- err + return + } + } else { + err := res.frame.addMetricIfNotExist(res.name, ctx.getResultBucketsSize(), res.IsServerAggregates()) + if err != nil { + ctx.logger.Error("problem adding new metric '%v', lset: %v, err:%v", res.name, res.frame.lset, err) + ctx.errorChannel <- err + return + } + lsetAttr, _ := res.fields[config.LabelSetAttrName].(string) + lset, _ := utils.LabelsFromString(lsetAttr) + lset = append(lset, utils.Label{Name: config.MetricNameAttrName, Value: res.name}) + currentResultHash := lset.Hash() + + // Aggregating cross series aggregates, only supported over raw data. + if ctx.isCrossSeriesAggregate { + lastTimePerMetric[currentResultHash], lastValuePerMetric[currentResultHash], _ = aggregateClientAggregatesCrossSeries(ctx, res, lastTimePerMetric[currentResultHash], lastValuePerMetric[currentResultHash]) + } else { + // Aggregating over time aggregates + if res.IsServerAggregates() { + aggregateServerAggregates(ctx, res) + } else if res.IsClientAggregates() { + aggregateClientAggregates(ctx, res) + } + } + + // It is possible to query an aggregate and down sample raw chunks in the same df. + if res.IsDownsample() { + lastTimePerMetric[currentResultHash], lastValuePerMetric[currentResultHash], err = downsampleRawData(ctx, res, lastTimePerMetric[currentResultHash], lastValuePerMetric[currentResultHash]) + if err != nil { + ctx.logger.Error("problem downsampling '%v', lset: %v, err:%v", res.name, res.frame.lset, err) + ctx.errorChannel <- err + return + } + } + } + } + } +} + +func rawCollector(ctx *selectQueryContext, res *qryResults) error { + ctx.logger.Debug("using Raw Collector for metric %v", res.name) + + if res.frame.isWildcardSelect { + columnIndex, ok := res.frame.columnByName[res.name] + if ok { + res.frame.rawColumns[columnIndex].(*V3ioRawSeries).AddChunks(res) + } else { + series, err := NewRawSeries(res, ctx.logger.GetChild("v3ioRawSeries")) + if err != nil { + return err + } + res.frame.rawColumns = append(res.frame.rawColumns, series) + res.frame.columnByName[res.name] = len(res.frame.rawColumns) - 1 + } + } else { + columnIndex := res.frame.columnByName[res.name] + rawColumn := res.frame.rawColumns[columnIndex] + if rawColumn != nil { + res.frame.rawColumns[columnIndex].(*V3ioRawSeries).AddChunks(res) + } else { + series, err := NewRawSeries(res, ctx.logger.GetChild("v3ioRawSeries")) + if err != nil { + return err + } + res.frame.rawColumns[columnIndex] = series + } + } + return nil +} + +func aggregateClientAggregates(ctx *selectQueryContext, res *qryResults) { + ctx.logger.Debug("using Client Aggregates Collector for metric %v", res.name) + it := newRawChunkIterator(res, ctx.logger) + for it.Next() { + t, v := it.At() + + if res.query.aggregationParams.HasAggregationWindow() { + windowAggregation(ctx, res, t, v) + } else { + intervalAggregation(ctx, res, t, v) + } + } +} + +func aggregateServerAggregates(ctx *selectQueryContext, res *qryResults) { + ctx.logger.Debug("using Server Aggregates Collector for metric %v", res.name) + + partitionStartTime := res.query.partition.GetStartTime() + rollupInterval := res.query.aggregationParams.GetRollupTime() + for _, col := range res.frame.columns { + if col.GetColumnSpec().metric == res.name && + aggregate.HasAggregates(col.GetColumnSpec().function) && + col.GetColumnSpec().isConcrete() { + + array, ok := res.fields[aggregate.ToAttrName(col.GetColumnSpec().function)] + if !ok { + ctx.logger.Warn("requested function %v was not found in response", col.GetColumnSpec().function) + } else { + // go over the byte array and convert each uint as we go to save memory allocation + bytes := array.([]byte) + + for i := 16; i+8 <= len(bytes); i += 8 { + val := binary.LittleEndian.Uint64(bytes[i : i+8]) + currentValueIndex := (i - 16) / 8 + + // Calculate server side aggregate bucket by its median time + currentValueTime := partitionStartTime + int64(currentValueIndex)*rollupInterval + rollupInterval/2 + currentCell := (currentValueTime - ctx.queryParams.From) / res.query.aggregationParams.Interval + + var floatVal float64 + if aggregate.IsCountAggregate(col.GetColumnSpec().function) { + floatVal = float64(val) + } else { + floatVal = math.Float64frombits(val) + } + + bottomMargin := res.query.aggregationParams.Interval + if res.query.aggregationParams.HasAggregationWindow() { + bottomMargin = res.query.aggregationParams.GetAggregationWindow() + } + if currentValueTime >= ctx.queryParams.From-bottomMargin && currentValueTime <= ctx.queryParams.To+res.query.aggregationParams.Interval { + if !res.query.aggregationParams.HasAggregationWindow() { + _ = res.frame.setDataAt(col.Name(), int(currentCell), floatVal) + } else { + windowAggregationWithServerAggregates(ctx, res, col, currentValueTime, floatVal) + } + } + } + } + } + } +} + +func downsampleRawData(ctx *selectQueryContext, res *qryResults, + previousPartitionLastTime int64, previousPartitionLastValue float64) (int64, float64, error) { + ctx.logger.Debug("using Downsample Collector for metric %v", res.name) + + it, ok := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) + if !ok { + return previousPartitionLastTime, previousPartitionLastValue, nil + } + col, err := res.frame.Column(res.name) + if err != nil { + return previousPartitionLastTime, previousPartitionLastValue, err + } + for currCell := 0; currCell < col.Len(); currCell++ { + currCellTime := int64(currCell)*ctx.queryParams.Step + ctx.queryParams.From + prev, err := col.getBuilder().At(currCell) + + // Only update a cell if it hasn't been set yet + if prev == nil || err != nil { + if it.Seek(currCellTime) { + t, v := it.At() + if t == currCellTime { + _ = res.frame.setDataAt(col.Name(), currCell, v) + } else { + prevT, prevV := it.PeakBack() + + // In case it's the first point in the partition use the last point of the previous partition for the interpolation + if prevT == 0 { + prevT = previousPartitionLastTime + prevV = previousPartitionLastValue + } + interpolatedT, interpolatedV := col.GetInterpolationFunction()(prevT, t, currCellTime, prevV, v) + + // Check if the interpolation was successful in terms of exceeding tolerance + if !(interpolatedT == 0 && interpolatedV == 0) { + _ = res.frame.setDataAt(col.Name(), currCell, interpolatedV) + } + } + } + } + } + + lastT, lastV := it.At() + return lastT, lastV, nil +} + +func aggregateClientAggregatesCrossSeries(ctx *selectQueryContext, res *qryResults, previousPartitionLastTime int64, previousPartitionLastValue float64) (int64, float64, error) { + ctx.logger.Debug("using Client Aggregates Collector for metric %v", res.name) + it, ok := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) + if !ok { + return previousPartitionLastTime, previousPartitionLastValue, nil + } + + var previousPartitionEndBucket int + if previousPartitionLastTime != 0 { + previousPartitionEndBucket = int((previousPartitionLastTime-ctx.queryParams.From)/ctx.queryParams.Step) + 1 + } + maxBucketForPartition := int((res.query.partition.GetEndTime() - ctx.queryParams.From) / ctx.queryParams.Step) + if maxBucketForPartition > ctx.getResultBucketsSize() { + maxBucketForPartition = ctx.getResultBucketsSize() + } + + for currBucket := previousPartitionEndBucket; currBucket < maxBucketForPartition; currBucket++ { + currBucketTime := int64(currBucket)*ctx.queryParams.Step + ctx.queryParams.From + + if it.Seek(currBucketTime) { + t, v := it.At() + if t == currBucketTime { + for _, col := range res.frame.columns { + if col.GetColumnSpec().metric == res.name { + _ = res.frame.setDataAt(col.Name(), currBucket, v) + } + } + } else { + prevT, prevV := it.PeakBack() + + // In case it's the first point in the partition use the last point of the previous partition for the interpolation + if prevT == 0 { + prevT = previousPartitionLastTime + prevV = previousPartitionLastValue + } + + for _, col := range res.frame.columns { + if col.GetColumnSpec().metric == res.name { + interpolatedT, interpolatedV := col.GetInterpolationFunction()(prevT, t, currBucketTime, prevV, v) + if !(interpolatedT == 0 && interpolatedV == 0) { + _ = res.frame.setDataAt(col.Name(), currBucket, interpolatedV) + } + } + } + } + } else { + break + } + } + + lastT, lastV := it.At() + return lastT, lastV, nil +} + +func intervalAggregation(ctx *selectQueryContext, res *qryResults, t int64, v float64) { + currentCell := getRelativeCell(t, ctx.queryParams.From, res.query.aggregationParams.Interval, false) + aggregateAllColumns(res, currentCell, v) +} + +func windowAggregation(ctx *selectQueryContext, res *qryResults, t int64, v float64) { + currentCell := getRelativeCell(t, ctx.queryParams.From, res.query.aggregationParams.Interval, true) + aggregationWindow := res.query.aggregationParams.GetAggregationWindow() + + if aggregationWindow > res.query.aggregationParams.Interval { + currentCellTime := ctx.queryParams.From + currentCell*res.query.aggregationParams.Interval + maximumAffectedTime := t + aggregationWindow + numAffectedCells := (maximumAffectedTime-currentCellTime)/res.query.aggregationParams.Interval + 1 // +1 to include the current cell + + for i := int64(0); i < numAffectedCells; i++ { + aggregateAllColumns(res, currentCell+i, v) + } + } else if aggregationWindow < res.query.aggregationParams.Interval { + if t+aggregationWindow >= ctx.queryParams.From+currentCell*res.query.aggregationParams.Interval { + aggregateAllColumns(res, currentCell, v) + } + } else { + aggregateAllColumns(res, currentCell, v) + } +} + +func windowAggregationWithServerAggregates(ctx *selectQueryContext, res *qryResults, column Column, t int64, v float64) { + currentCell := getRelativeCell(t, ctx.queryParams.From, res.query.aggregationParams.Interval, true) + + aggregationWindow := res.query.aggregationParams.GetAggregationWindow() + if aggregationWindow > res.query.aggregationParams.Interval { + currentCellTime := ctx.queryParams.From + currentCell*res.query.aggregationParams.Interval + maxAffectedTime := t + aggregationWindow + numAffectedCells := (maxAffectedTime-currentCellTime)/res.query.aggregationParams.Interval + 1 // +1 to include the current cell + + for i := int64(0); i < numAffectedCells; i++ { + _ = res.frame.setDataAt(column.Name(), int(currentCell+i), v) + } + } else { + _ = res.frame.setDataAt(column.Name(), int(currentCell), v) + } +} + +func getRelativeCell(time, beginning, interval int64, roundUp bool) int64 { + cell := (time - beginning) / interval + + if roundUp && (time-beginning)%interval > 0 { + cell++ + } + + return cell +} + +// Set data to all aggregated columns for the given metric +func aggregateAllColumns(res *qryResults, cell int64, value float64) { + for _, col := range res.frame.columns { + colSpec := col.GetColumnSpec() + if colSpec.metric == res.name && colSpec.function != 0 { + _ = res.frame.setDataAt(col.Name(), int(cell), value) + } + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go new file mode 100644 index 00000000..a903122f --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go @@ -0,0 +1,864 @@ +package pquerier + +import ( + "fmt" + "math" + "time" + + "github.com/pkg/errors" + "github.com/v3io/frames" + "github.com/v3io/frames/pb" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type frameIterator struct { + ctx *selectQueryContext + setIndex int + seriesIndex int + columnNum int + err error +} + +// create new frame set iterator, frame iter has a SeriesSet interface (for Prometheus) plus columnar interfaces +func newFrameIterator(ctx *selectQueryContext) (*frameIterator, error) { + if !ctx.isRawQuery() { + for _, f := range ctx.frameList { + if err := f.finishAllColumns(); err != nil { + return nil, errors.Wrapf(err, "failed to create columns for DF=%v", f.Labels()) + } + } + } + + return &frameIterator{ctx: ctx, columnNum: ctx.totalColumns, setIndex: 0, seriesIndex: -1}, nil +} + +// advance to the next data frame +func (fi *frameIterator) NextFrame() bool { + fi.setIndex++ + return fi.setIndex-1 < len(fi.ctx.frameList) +} + +// get current data frame +func (fi *frameIterator) GetFrame() (frames.Frame, error) { + return fi.ctx.frameList[fi.setIndex-1].GetFrame() +} + +// advance to the next time series (for Prometheus mode) +func (fi *frameIterator) Next() bool { + + var numberOfColumnsInCurrentSeries int + if len(fi.ctx.frameList) > 0 { + numberOfColumnsInCurrentSeries = len(fi.ctx.frameList[fi.setIndex].columnByName) + } + + if fi.seriesIndex < numberOfColumnsInCurrentSeries-1 { + // can advance series within a frame + fi.seriesIndex++ + } else if fi.setIndex+1 >= len(fi.ctx.frameList) { + // already in the last column in the last frame + return false + } else { + // advance to next frame + fi.setIndex++ + fi.seriesIndex = 0 + } + + if fi.isCurrentSeriesHidden() { + return fi.Next() + } + + series := fi.ctx.frameList[fi.setIndex] + // If raw series is nil + if series.isRawSeries && series.rawColumns[fi.seriesIndex] == nil { + return fi.Next() + } + + return true +} + +// get current time series (for Prometheus mode) +func (fi *frameIterator) At() utils.Series { + s, err := fi.ctx.frameList[fi.setIndex].TimeSeries(fi.seriesIndex) + if err != nil { + fi.err = err + } + return s +} + +func (fi *frameIterator) isCurrentSeriesHidden() bool { + if fi.ctx.isRawQuery() { + return false + } + col, err := fi.ctx.frameList[fi.setIndex].ColumnAt(fi.seriesIndex) + if err != nil { + fi.err = err + } + + return col.GetColumnSpec().isHidden +} + +func (fi *frameIterator) Err() error { + return fi.err +} + +// data frame, holds multiple value columns and an index (time) column +func newDataFrame(columnsSpec []columnMeta, indexColumn Column, lset utils.Labels, hash uint64, isRawQuery bool, columnSize int, useServerAggregates, showAggregateLabel bool) (*dataFrame, error) { + df := &dataFrame{lset: lset, hash: hash, isRawSeries: isRawQuery, showAggregateLabel: showAggregateLabel} + // is raw query + if isRawQuery { + df.columnByName = make(map[string]int, len(columnsSpec)) + + // Create the columns in the DF based on the requested columns order. + for i, col := range columnsSpec { + if col.metric == "" { + df.isWildcardSelect = true + break + } + df.columnByName[col.getColumnName()] = i + } + + // If no specific order was requested (like when querying for all metrics), + // discard order and reset columns for future initialization. + if df.isWildcardSelect { + df.columnByName = make(map[string]int, len(columnsSpec)) + df.rawColumns = []utils.Series{} + } else { + // Initialize `rawcolumns` to the requested size. + df.rawColumns = make([]utils.Series, len(columnsSpec)) + } + } else { + numOfColumns := len(columnsSpec) + df.index = indexColumn + df.columnByName = make(map[string]int, numOfColumns) + df.columns = make([]Column, 0, numOfColumns) + df.metricToCountColumn = map[string]Column{} + df.metrics = map[string]struct{}{} + df.nonEmptyRowsIndicators = make([]bool, columnSize) + + i := 0 + for _, col := range columnsSpec { + // In case user wanted all metrics, save the template for every metric. + // Once we know what metrics we have we will create Columns out of the column Templates + if col.isWildcard() { + df.columnsTemplates = append(df.columnsTemplates, col) + } else { + column, err := createColumn(col, columnSize, useServerAggregates) + if err != nil { + return nil, err + } + if aggregate.IsCountAggregate(col.function) { + df.metricToCountColumn[col.metric] = column + } + df.columns = append(df.columns, column) + df.columnByName[col.getColumnName()] = i + i++ + } + } + for _, col := range df.columns { + if !col.GetColumnSpec().isConcrete() { + fillDependantColumns(col, df) + } + } + } + + return df, nil +} + +func createColumn(col columnMeta, columnSize int, useServerAggregates bool) (Column, error) { + var column Column + if col.function != 0 { + if col.isConcrete() { + function, err := getAggreagteFunction(col.function, useServerAggregates) + if err != nil { + return nil, err + } + column = NewConcreteColumn(col.getColumnName(), col, columnSize, function) + } else { + function, err := getVirtualColumnFunction(col.function) + if err != nil { + return nil, err + } + + column = NewVirtualColumn(col.getColumnName(), col, columnSize, function) + } + } else { + column = newDataColumn(col.getColumnName(), col, columnSize, frames.FloatType) + } + + return column, nil +} + +func getAggreagteFunction(aggrType aggregate.AggrType, useServerAggregates bool) (func(interface{}, interface{}) interface{}, error) { + if useServerAggregates { + return aggregate.GetServerAggregationsFunction(aggrType) + } + return aggregate.GetClientAggregationsFunction(aggrType) +} + +func fillDependantColumns(wantedColumn Column, df *dataFrame) { + wantedAggregations := aggregate.GetDependantAggregates(wantedColumn.GetColumnSpec().function) + var columns []Column + + // Order of the dependent columns should be the same as `wantedAggregations`. + for _, agg := range wantedAggregations { + for _, col := range df.columns { + if col.GetColumnSpec().metric == wantedColumn.GetColumnSpec().metric && + agg == col.GetColumnSpec().function { + columns = append(columns, col) + } + } + } + wantedColumn.(*virtualColumn).dependantColumns = columns +} + +func getVirtualColumnFunction(aggrType aggregate.AggrType) (func([]Column, int) (interface{}, error), error) { + function, err := aggregate.GetServerVirtualAggregationFunction(aggrType) + if err != nil { + return nil, err + } + return func(columns []Column, index int) (interface{}, error) { + data := make([]float64, len(columns)) + for i, c := range columns { + v, err := c.FloatAt(index) + if err != nil { + return nil, err + } + + data[i] = v + } + return function(data), nil + }, nil +} + +type dataFrame struct { + lset utils.Labels + hash uint64 + showAggregateLabel bool + + isRawSeries bool + isRawColumnsGenerated bool + rawColumns []utils.Series + + columnsTemplates []columnMeta + columns []Column + index Column + columnByName map[string]int // name -> index in columns + nonEmptyRowsIndicators []bool + nullValuesMaps []*pb.NullValuesMap + + metrics map[string]struct{} + metricToCountColumn map[string]Column + + isWildcardSelect bool +} + +func (d *dataFrame) addMetricIfNotExist(metricName string, columnSize int, useServerAggregates bool) error { + if _, ok := d.metrics[metricName]; !ok { + return d.addMetricFromTemplate(metricName, columnSize, useServerAggregates) + } + return nil +} + +func (d *dataFrame) addMetricFromTemplate(metricName string, columnSize int, useServerAggregates bool) error { + var newColumns []Column + for _, col := range d.columnsTemplates { + col.metric = metricName + newCol, err := createColumn(col, columnSize, useServerAggregates) + if err != nil { + return err + } + + // Make sure there is only 1 count column per metric. + // Count is the only column we automatically add so in some cases we get multiple count columns in the templates. + _, ok := d.metricToCountColumn[metricName] + if !aggregate.IsCountAggregate(col.function) || !ok { + newColumns = append(newColumns, newCol) + } + if aggregate.IsCountAggregate(col.function) && !ok { + d.metricToCountColumn[metricName] = newCol + } + } + + numberOfColumns := len(d.columns) + d.columns = append(d.columns, newColumns...) + for i, col := range newColumns { + d.columnByName[col.GetColumnSpec().getColumnName()] = numberOfColumns + i + if !col.GetColumnSpec().isConcrete() { + fillDependantColumns(col, d) + } + } + d.metrics[metricName] = struct{}{} + return nil +} + +func (d *dataFrame) setDataAt(columnName string, index int, value interface{}) error { + colIndex, ok := d.columnByName[columnName] + if !ok { + return fmt.Errorf("no such column %v", columnName) + } + col := d.columns[colIndex] + err := col.SetDataAt(index, value) + if err == nil { + d.nonEmptyRowsIndicators[index] = true + } + + return err +} + +func (d *dataFrame) Len() int { + if d.isRawSeries { + return len(d.rawColumns) + } + return len(d.columns) +} + +func (d *dataFrame) Labels() utils.Labels { + return d.lset +} + +func (d *dataFrame) Names() []string { + names := make([]string, d.Len()) + + for i := 0; i < d.Len(); i++ { + names[i] = d.columns[i].Name() + } + + return names +} + +func (d *dataFrame) ColumnAt(i int) (Column, error) { + if i >= d.Len() { + return nil, fmt.Errorf("index %d out of bounds [0:%d]", i, d.Len()) + } + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + return d.columns[i], nil +} + +func (d *dataFrame) Columns() ([]Column, error) { + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + return d.columns, nil +} + +func (d *dataFrame) Column(name string) (Column, error) { + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + i, ok := d.columnByName[name] + if !ok { + return nil, fmt.Errorf("column %q not found", name) + } + + return d.columns[i], nil +} + +func (d *dataFrame) Index() (Column, error) { + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + return d.index, nil +} + +func (d *dataFrame) TimeSeries(i int) (utils.Series, error) { + if d.isRawSeries { + return d.rawColumns[i], nil + } + currentColumn, err := d.ColumnAt(i) + if err != nil { + return nil, err + } + + return NewDataFrameColumnSeries(d.index, + currentColumn, + d.metricToCountColumn[currentColumn.GetColumnSpec().metric], + d.Labels(), + d.hash, + d.showAggregateLabel), nil +} + +// Creates Frames.columns out of tsdb columns. +// First do all the concrete columns and then the virtual who are dependant on the concrete. +func (d *dataFrame) finishAllColumns() error { + // Marking as deleted every index (row) that has no data. + // Also, adding "blank" rows when needed to align all columns to the same time. + // Iterating backwards to not miss any deleted cell. + for i := len(d.nonEmptyRowsIndicators) - 1; i >= 0; i-- { + hasData := d.nonEmptyRowsIndicators[i] + if !hasData { + for _, col := range d.columns { + _ = col.Delete(i) + } + _ = d.index.Delete(i) + } else { + for _, col := range d.columns { + switch col.(type) { + case *ConcreteColumn, *dataColumn: + value, err := col.getBuilder().At(i) + if err != nil || value == nil { + err := col.getBuilder().Set(i, math.NaN()) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("could not create new column at index %d", i)) + } + } + } + } + } + } + + var columnSize int + var err error + for _, col := range d.columns { + switch col.(type) { + case *dataColumn: + err = col.finish() + case *ConcreteColumn: + err = col.finish() + if columnSize == 0 { + columnSize = col.FramesColumn().Len() + } else if columnSize != col.FramesColumn().Len() { + return fmt.Errorf("column length mismatch %v!=%v col=%v", columnSize, col.FramesColumn().Len(), col.Name()) + } + } + if err != nil { + return errors.Wrapf(err, "failed to create column '%v'", col.Name()) + } + } + for _, col := range d.columns { + switch newCol := col.(type) { + case *virtualColumn: + newCol.size = columnSize + err = col.finish() + } + if err != nil { + return errors.Wrapf(err, "failed to create column '%v'", col.Name()) + } + } + + err = d.index.finish() + + return err +} + +// Normalizing the raw data of different metrics to one timeline with both metric's times. +// +// for example the following time series: +// metric1 - (t0,v0), (t2, v1) +// metric2 - (t1,v2), (t2, v3) +// +// will be converted to: +// time metric1 metric2 +// t0 v0 NaN +// t1 NaN v2 +// t2 v1 v3 +// +func (d *dataFrame) rawSeriesToColumns() error { + var timeData []time.Time + var currentTime int64 + numberOfRawColumns := len(d.rawColumns) + columns := make([]frames.ColumnBuilder, numberOfRawColumns) + nonExhaustedIterators := numberOfRawColumns + seriesToDataType := make([]frames.DType, numberOfRawColumns) + seriesToDefaultValue := make([]interface{}, numberOfRawColumns) + nextTime := int64(math.MaxInt64) + seriesHasMoreData := make([]bool, numberOfRawColumns) + emptyMetrics := make(map[int]string) + + d.nullValuesMaps = make([]*pb.NullValuesMap, 0) + nullValuesRowIndex := -1 + + for i, rawSeries := range d.rawColumns { + if rawSeries == nil { + missingColumn := "(unknown column)" + for columnName, index := range d.columnByName { + if index == i { + missingColumn = columnName + break + } + } + emptyMetrics[i] = missingColumn + nonExhaustedIterators-- + continue + } + if rawSeries.Iterator().Next() { + seriesHasMoreData[i] = true + t, _ := rawSeries.Iterator().At() + if t < nextTime { + nextTime = t + } + } else { + nonExhaustedIterators-- + } + + currentEnc := chunkenc.EncXOR + if ser, ok := rawSeries.(*V3ioRawSeries); ok { + currentEnc = ser.encoding + } + + if currentEnc == chunkenc.EncVariant { + columns[i] = frames.NewSliceColumnBuilder(rawSeries.Labels().Get(config.PrometheusMetricNameAttribute), + frames.StringType, 0) + seriesToDataType[i] = frames.StringType + seriesToDefaultValue[i] = "" + } else { + columns[i] = frames.NewSliceColumnBuilder(rawSeries.Labels().Get(config.PrometheusMetricNameAttribute), + frames.FloatType, 0) + seriesToDataType[i] = frames.FloatType + seriesToDefaultValue[i] = math.NaN() + } + } + + for nonExhaustedIterators > 0 { + currentTime = nextTime + nextTime = int64(math.MaxInt64) + timeData = append(timeData, time.Unix(currentTime/1000, (currentTime%1000)*1e6)) + + // add new row to null values map + d.nullValuesMaps = append(d.nullValuesMaps, &pb.NullValuesMap{NullColumns: make(map[string]bool)}) + nullValuesRowIndex++ + + for seriesIndex, rawSeries := range d.rawColumns { + if rawSeries == nil { + continue + } + iter := rawSeries.Iterator() + + var v interface{} + var t int64 + + if seriesToDataType[seriesIndex] == frames.StringType { + t, v = iter.AtString() + } else { + t, v = iter.At() + } + + if t == currentTime { + e := columns[seriesIndex].Append(v) + if e != nil { + return errors.Wrap(e, fmt.Sprintf("could not append value %v", v)) + } + if iter.Next() { + t, _ = iter.At() + } else { + nonExhaustedIterators-- + seriesHasMoreData[seriesIndex] = false + } + } else { + e := columns[seriesIndex].Append(seriesToDefaultValue[seriesIndex]) + if e != nil { + return errors.Wrap(e, fmt.Sprintf("could not append from default value %v", seriesToDefaultValue[seriesIndex])) + } + d.nullValuesMaps[nullValuesRowIndex].NullColumns[columns[seriesIndex].Name()] = true + } + + if seriesHasMoreData[seriesIndex] && t < nextTime { + nextTime = t + } + } + } + + numberOfRows := len(timeData) + colSpec := columnMeta{metric: "time"} + d.index = newDataColumn("time", colSpec, numberOfRows, frames.TimeType) + e := d.index.SetData(timeData, numberOfRows) + if e != nil { + return errors.Wrap(e, fmt.Sprintf("could not set data, timeData=%v, numberOfRows=%v", timeData, numberOfRows)) + } + + d.columns = make([]Column, numberOfRawColumns) + + for i, series := range d.rawColumns { + if series == nil { + continue + } + + name := series.Labels().Get(config.PrometheusMetricNameAttribute) + spec := columnMeta{metric: name} + col := newDataColumn(name, spec, numberOfRows, seriesToDataType[i]) + col.framesCol = columns[i].Finish() + d.columns[i] = col + } + + if len(emptyMetrics) > 0 { + nullValues := make([]float64, numberOfRows) + for i := 0; i < numberOfRows; i++ { + nullValues[i] = math.NaN() + } + for index, metricName := range emptyMetrics { + spec := columnMeta{metric: metricName} + col := newDataColumn(metricName, spec, numberOfRows, frames.FloatType) + framesCol, err := frames.NewSliceColumn(metricName, nullValues) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("could not create empty column '%v'", metricName)) + } + col.framesCol = framesCol + d.columns[index] = col + + // mark empty columns + for i := 0; i < numberOfRows; i++ { + d.nullValuesMaps[i].NullColumns[col.name] = true + } + } + } + + d.isRawColumnsGenerated = true + + return nil +} + +func (d *dataFrame) shouldGenerateRawColumns() bool { return d.isRawSeries && !d.isRawColumnsGenerated } + +func (d *dataFrame) GetFrame() (frames.Frame, error) { + var framesColumns []frames.Column + if d.shouldGenerateRawColumns() { + err := d.rawSeriesToColumns() + if err != nil { + return nil, err + } + } + for _, col := range d.columns { + if !col.GetColumnSpec().isHidden { + framesColumns = append(framesColumns, col.FramesColumn()) + } + } + + return frames.NewFrameWithNullValues(framesColumns, []frames.Column{d.index.FramesColumn()}, d.Labels().Map(), d.nullValuesMaps) +} + +// Column object, store a single value or index column/array +// There can be data columns or calculated columns (e.g. Avg built from count & sum columns) + +// Column is a data column +type Column interface { + Len() int // Number of elements + Name() string // Column name + DType() frames.DType // Data type (e.g. IntType, FloatType ...) + FloatAt(i int) (float64, error) // Float value at index i + StringAt(i int) (string, error) // String value at index i + TimeAt(i int) (time.Time, error) // time value at index i + GetColumnSpec() columnMeta // Get the column's metadata + SetDataAt(i int, value interface{}) error + SetData(d interface{}, size int) error + GetInterpolationFunction() InterpolationFunction + FramesColumn() frames.Column + Delete(index int) error + + setMetricName(name string) + getBuilder() frames.ColumnBuilder + finish() error +} + +type basicColumn struct { + name string + size int + spec columnMeta + interpolationFunction InterpolationFunction + builder frames.ColumnBuilder + framesCol frames.Column +} + +func (c *basicColumn) getBuilder() frames.ColumnBuilder { + return c.builder +} + +func (c *basicColumn) finish() error { + c.framesCol = c.builder.Finish() + return nil +} + +func (c *basicColumn) Delete(index int) error { + return c.builder.Delete(index) +} + +func (c *basicColumn) FramesColumn() frames.Column { + return c.framesCol +} + +// Name returns the column name +func (c *basicColumn) Name() string { + return c.name +} + +// Len returns the number of elements +func (c *basicColumn) Len() int { + if c.framesCol != nil { + return c.framesCol.Len() + } + return c.size +} + +func (c *basicColumn) isValidIndex(i int) bool { return i >= 0 && i < c.size } + +func (c *basicColumn) GetColumnSpec() columnMeta { return c.spec } + +func (c *basicColumn) setMetricName(name string) { + c.spec.metric = name + c.name = c.spec.getColumnName() +} + +func (c *basicColumn) SetDataAt(i int, value interface{}) error { + if !c.isValidIndex(i) { + return fmt.Errorf("index %d out of bounds [0:%d]", i, c.size) + } + return nil +} + +func (c *basicColumn) SetData(d interface{}, size int) error { + return errors.New("method not supported") +} +func (c *basicColumn) GetInterpolationFunction() InterpolationFunction { + return c.interpolationFunction +} + +func newDataColumn(name string, colSpec columnMeta, size int, datatype frames.DType) *dataColumn { + dc := &dataColumn{basicColumn: basicColumn{name: name, spec: colSpec, size: size, + interpolationFunction: GetInterpolateFunc(colSpec.interpolationType, colSpec.interpolationTolerance), + builder: frames.NewSliceColumnBuilder(name, datatype, size)}} + return dc + +} + +type dataColumn struct { + basicColumn +} + +// DType returns the data type +func (dc *dataColumn) DType() frames.DType { + return dc.framesCol.DType() +} + +// FloatAt returns float64 value at index i +func (dc *dataColumn) FloatAt(i int) (float64, error) { + return dc.framesCol.FloatAt(i) +} + +// StringAt returns string value at index i +func (dc *dataColumn) StringAt(i int) (string, error) { + return dc.framesCol.StringAt(i) +} + +// TimeAt returns time.Time value at index i +func (dc *dataColumn) TimeAt(i int) (time.Time, error) { + return dc.framesCol.TimeAt(i) +} + +func (dc *dataColumn) SetData(d interface{}, size int) error { + dc.size = size + var err error + dc.framesCol, err = frames.NewSliceColumn(dc.name, d) + return err +} + +func (dc *dataColumn) SetDataAt(i int, value interface{}) error { + if !dc.isValidIndex(i) { + return fmt.Errorf("index %d out of bounds [0:%d]", i, dc.size) + } + + var err error + switch value.(type) { + case float64: + // Update requested cell, only if not trying to override an existing value with NaN + prev, _ := dc.builder.At(i) + if !(math.IsNaN(value.(float64)) && prev != nil && !math.IsNaN(prev.(float64))) { + err = dc.builder.Set(i, value) + } + default: + err = dc.builder.Set(i, value) + } + return err +} + +func NewConcreteColumn(name string, colSpec columnMeta, size int, setFunc func(old, new interface{}) interface{}) *ConcreteColumn { + col := &ConcreteColumn{basicColumn: basicColumn{name: name, spec: colSpec, size: size, + interpolationFunction: GetInterpolateFunc(colSpec.interpolationType, colSpec.interpolationTolerance), + builder: frames.NewSliceColumnBuilder(name, frames.FloatType, size)}, setFunc: setFunc} + return col +} + +type ConcreteColumn struct { + basicColumn + setFunc func(old, new interface{}) interface{} +} + +func (c *ConcreteColumn) DType() frames.DType { + return c.framesCol.DType() +} +func (c *ConcreteColumn) FloatAt(i int) (float64, error) { + return c.framesCol.FloatAt(i) +} +func (c *ConcreteColumn) StringAt(i int) (string, error) { + return "", errors.New("aggregated column does not support string type") +} +func (c *ConcreteColumn) TimeAt(i int) (time.Time, error) { + return time.Unix(0, 0), errors.New("aggregated column does not support time type") +} +func (c *ConcreteColumn) SetDataAt(i int, val interface{}) error { + if !c.isValidIndex(i) { + return fmt.Errorf("index %d out of bounds [0:%d]", i, c.size) + } + value, _ := c.builder.At(i) + err := c.builder.Set(i, c.setFunc(value, val)) + return err +} + +func NewVirtualColumn(name string, colSpec columnMeta, size int, function func([]Column, int) (interface{}, error)) Column { + col := &virtualColumn{basicColumn: basicColumn{name: name, spec: colSpec, size: size, + interpolationFunction: GetInterpolateFunc(colSpec.interpolationType, colSpec.interpolationTolerance), + builder: frames.NewSliceColumnBuilder(name, frames.FloatType, size)}, + function: function} + return col +} + +type virtualColumn struct { + basicColumn + dependantColumns []Column + function func([]Column, int) (interface{}, error) +} + +func (c *virtualColumn) finish() error { + data := make([]float64, c.Len()) + var err error + for i := 0; i < c.Len(); i++ { + value, err := c.function(c.dependantColumns, i) + if err != nil { + return err + } + data[i] = value.(float64) + } + + c.framesCol, err = frames.NewSliceColumn(c.name, data) + if err != nil { + return err + } + return nil +} + +func (c *virtualColumn) DType() frames.DType { + return c.framesCol.DType() +} +func (c *virtualColumn) FloatAt(i int) (float64, error) { + return c.framesCol.FloatAt(i) +} +func (c *virtualColumn) StringAt(i int) (string, error) { + return c.framesCol.StringAt(i) +} +func (c *virtualColumn) TimeAt(i int) (time.Time, error) { + return time.Unix(0, 0), errors.New("aggregated column does not support time type") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate.go new file mode 100644 index 00000000..014213c6 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate.go @@ -0,0 +1,120 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package pquerier + +import ( + "fmt" + "math" + "strings" +) + +type InterpolationType uint8 + +func (it InterpolationType) String() string { + switch it { + case interpolateNone: + return "none" + case interpolateNaN: + return "nan" + case interpolatePrev: + return "prev_val" + case interpolateNext: + return "next_val" + case interpolateLinear: + return "linear" + default: + return "unknown" + } +} + +const ( + interpolateNone InterpolationType = 0 + interpolateNaN InterpolationType = 1 + interpolatePrev InterpolationType = 2 + interpolateNext InterpolationType = 3 + interpolateLinear InterpolationType = 4 + defaultInterpolation InterpolationType = interpolateNext +) + +type InterpolationFunction func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) + +func StrToInterpolateType(str string) (InterpolationType, error) { + switch strings.ToLower(str) { + case "none", "": + return interpolateNone, nil + case "nan": + return interpolateNaN, nil + case "prev_val": + return interpolatePrev, nil + case "next_val": + return interpolateNext, nil + case "lin", "linear": + return interpolateLinear, nil + } + return 0, fmt.Errorf("unknown/unsupported interpulation function %s", str) +} + +// return line interpolation function, estimate seek value based on previous and next points +func GetInterpolateFunc(alg InterpolationType, tolerance int64) InterpolationFunction { + switch alg { + case interpolateNaN: + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + return tseek, math.NaN() + } + case interpolatePrev: + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + if absoluteDiff(tseek, tprev) > tolerance { + return 0, 0 + } + return tseek, vprev + } + case interpolateNext: + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + if absoluteDiff(tnext, tseek) > tolerance { + return 0, 0 + } + return tseek, vnext + } + case interpolateLinear: + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + if (absoluteDiff(tseek, tprev) > tolerance) || absoluteDiff(tnext, tseek) > tolerance { + return 0, 0 + } + if math.IsNaN(vprev) || math.IsNaN(vnext) { + return tseek, math.NaN() + } + v := vprev + (vnext-vprev)*float64(tseek-tprev)/float64(tnext-tprev) + return tseek, v + } + default: + // None interpolation + return func(tprev, tnext, tseek int64, vprev, vnext float64) (int64, float64) { + return tnext, vnext + } + } +} + +func absoluteDiff(a, b int64) int64 { + if a > b { + return a - b + } + return b - a +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate_test.go new file mode 100644 index 00000000..8f082fe0 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/interpolate_test.go @@ -0,0 +1,86 @@ +// +build unit + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package pquerier + +import ( + "math" + "testing" + + "github.com/stretchr/testify/suite" +) + +type testInterpolationSuite struct { + suite.Suite +} + +func (suite *testInterpolationSuite) TestNone() { + fntype, err := StrToInterpolateType("") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(110)) + suite.Require().Equal(v, 200.0) +} + +func (suite *testInterpolationSuite) TestNaN() { + fntype, err := StrToInterpolateType("nan") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(math.IsNaN(v), true) +} + +func (suite *testInterpolationSuite) TestPrev() { + fntype, err := StrToInterpolateType("prev_val") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(v, 100.0) +} + +func (suite *testInterpolationSuite) TestNext() { + fntype, err := StrToInterpolateType("next_val") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(v, 200.0) +} + +func (suite *testInterpolationSuite) TestLin() { + fntype, err := StrToInterpolateType("lin") + suite.Require().Nil(err) + fn := GetInterpolateFunc(fntype, math.MaxInt64) + t, v := fn(10, 110, 60, 100, 200) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(v, 150.0) + t, v = fn(10, 110, 60, 100, math.NaN()) + suite.Require().Equal(t, int64(60)) + suite.Require().Equal(math.IsNaN(v), true) +} + +func TestInterpolationSuite(t *testing.T) { + suite.Run(t, new(testInterpolationSuite)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go new file mode 100644 index 00000000..45b8724e --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go @@ -0,0 +1,708 @@ +// +build integration + +package pqueriertest + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testClientAggregatesSuite struct { + basicQueryTestSuite +} + +func TestClientAggregatesSuite(t *testing.T) { + suite.Run(t, new(testClientAggregatesSuite)) +} + +func (suite *testClientAggregatesSuite) TestQueryAggregateWithNameWildcard() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedData := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + expected := map[string]map[string][]tsdbtest.DataPoint{"cpu": expectedData, "diskio": expectedData} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Functions: "max,min,sum", Step: 2 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + metricName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + aggr := set.At().Labels().Get(aggregate.AggregateLabel) + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareMultipleMetrics(data, expected, metricName, aggr) + } + + assert.Equal(suite.T(), len(expectedData)*len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestQueryAggregateWithFilterOnMetricName() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedData := map[string][]tsdbtest.DataPoint{"max": {{Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + expected := map[string]map[string][]tsdbtest.DataPoint{"cpu": expectedData} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Functions: "max", Step: 2 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), Filter: "_name=='cpu'"} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + metricName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + aggr := set.At().Labels().Get(aggregate.AggregateLabel) + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareMultipleMetrics(data, expected, metricName, aggr) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestClientAggregatesSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "sum,max,min", Step: 2 * 60 * 1000, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Labels: labels1, + Name: "cpu", + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 90}}, + "min": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 20}}, + "max": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, {Time: suite.basicQueryTime, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min", + Step: 5 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionNonConcreteAggregates() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + tsdbtest.MinuteInMillis, 12}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"avg": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 11}, {Time: suite.basicQueryTime, Value: 30}}, + "stdvar": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 2}, {Time: suite.basicQueryTime, Value: 100}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "avg,stdvar", + Step: 5 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionOneStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 25*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 20*tsdbtest.DaysInMillis, 20}, + {suite.basicQueryTime - 12*tsdbtest.DaysInMillis, 30}, + {suite.basicQueryTime - 1*tsdbtest.DaysInMillis, 40}, + {suite.basicQueryTime + 20*tsdbtest.DaysInMillis, 50}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"count": {{Time: suite.basicQueryTime - 25*tsdbtest.DaysInMillis, Value: 5}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "count", + Step: 0, + From: suite.basicQueryTime - 25*tsdbtest.DaysInMillis, + To: suite.basicQueryTime + 21*tsdbtest.DaysInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestGetEmptyResponse() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "sum,max,min,sqr", + Step: 1 * 60 * 60 * 1000, + From: suite.basicQueryTime - 10*tsdbtest.DaysInMillis, + To: suite.basicQueryTime - 8*tsdbtest.DaysInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + } + + assert.Equal(suite.T(), 0, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestSelectAggregatesByRequestedColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu", Function: "max"}, {Metric: "cpu", Function: "min"}, {Metric: "cpu", Function: "sum"}}, + Step: 2 * 60 * 1000, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestSelectAggregatesAndRawByRequestedColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "": {{suite.basicQueryTime, 10}, {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu", Function: "sum"}, {Metric: "cpu"}}, + Step: 2 * 60 * 1000, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestQueryAllData() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min", + Step: 2 * 60 * 1000, + From: 0, + To: math.MaxInt64} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestAggregatesWithZeroStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "max": {{Time: suite.basicQueryTime, Value: 40}}, + "min": {{Time: suite.basicQueryTime, Value: 10}}, + "sum": {{Time: suite.basicQueryTime, Value: 100}}, + "count": {{Time: suite.basicQueryTime, Value: 4}}, + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "max, sum,count,min", Step: 0, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expected[agg] { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 4, seriesCount, "series count didn't match expected") +} + +func (suite *testClientAggregatesSuite) TestUsePreciseAggregationsConfig() { + suite.v3ioConfig.UsePreciseAggregations = true + defer func() { suite.v3ioConfig.UsePreciseAggregations = false }() + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter.") + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 100}}, + "min": {{Time: suite.basicQueryTime, Value: 10}}, + "max": {{Time: suite.basicQueryTime, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2.") + + params := &pquerier.SelectParams{Name: "cpu", Functions: "sum,max,min", Step: 1 * 60 * 60 * 1000, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + suite.NoError(err, "failed to exeute query,") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(3, seriesCount, "series count didn't match expected") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go new file mode 100644 index 00000000..624ec921 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go @@ -0,0 +1,770 @@ +// +build integration + +package pqueriertest + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testCrossSeriesAggregatesSuite struct { + basicQueryTestSuite +} + +func TestCrossSeriesAggregatesSuite(t *testing.T) { + suite.Run(t, new(testCrossSeriesAggregatesSuite)) +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesTimesFallsOnStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 20}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 30}}, + "avg": {{Time: suite.basicQueryTime, Value: 15}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 25}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 35}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum_all,min_all,avg_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregates() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 20}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 30}}, + "avg": {{Time: suite.basicQueryTime, Value: 15}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 25}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 35}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum_all,min_all,avg_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiPartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 60}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 20}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 30}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "max": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 20}, + {Time: suite.basicQueryTime - 4*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime - 2*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 60}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "max_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime + 3*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterpolation() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 40}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 20}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select sum_all(prev_val(cpu)), min_all(prev_val(cpu)), max_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 2 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime + selectParams.To = suite.basicQueryTime + 5*tsdbtest.MinuteInMillis + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiPartitionExactlyOnStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 60}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 20}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 30}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 30}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime, Value: 50}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 100}}, + "min": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 40}}, + "avg": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 15}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime, Value: 25}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select sum_all(prev_val(cpu)), min_all(prev_val(cpu)),avg_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 2 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime - 7*tsdbtest.DaysInMillis + selectParams.To = suite.basicQueryTime + 3*tsdbtest.MinuteInMillis + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiPartitionWithInterpolation() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 3*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 60}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 20}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime, 30}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 30}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 21}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 6*tsdbtest.MinuteInMillis, Value: 21}, + {Time: suite.basicQueryTime, Value: 50}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 61}}, + "count": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 6*tsdbtest.MinuteInMillis, Value: 2}, + {Time: suite.basicQueryTime, Value: 2}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 2}}, + "min": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 10}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 6*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 1}}, + "avg": {{Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, Value: 15}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 4*tsdbtest.MinuteInMillis, Value: 10.5}, + {Time: suite.basicQueryTime - 7*tsdbtest.DaysInMillis + 6*tsdbtest.MinuteInMillis, Value: 10.5}, + {Time: suite.basicQueryTime, Value: 25}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30.5}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select sum_all(prev_val(cpu)), min_all(prev_val(cpu)),avg_all(prev_val(cpu)),count_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 2 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime - 7*tsdbtest.DaysInMillis + selectParams.To = suite.basicQueryTime + 3*tsdbtest.MinuteInMillis + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterpolationOverTolerance() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 70}}, + "min": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 30}}, + "max": {{Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select sum_all(prev_val(cpu)), min_all(prev_val(cpu)), max_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 5 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime + selectParams.To = suite.basicQueryTime + 10*tsdbtest.MinuteInMillis + for i := 0; i < len(selectParams.RequestedColumns); i++ { + selectParams.RequestedColumns[i].InterpolationTolerance = tsdbtest.MinuteInMillis + } + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime, Value: 30}}, + "min": {{Time: suite.basicQueryTime, Value: 10}}, + "max": {{Time: suite.basicQueryTime, Value: 20}}, + "count": {{Time: suite.basicQueryTime, Value: 2}}, + "avg": {{Time: suite.basicQueryTime, Value: 15}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum_all,min_all,max_all,count_all,avg_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 1*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestOnlyVirtualCrossSeriesAggregateWithInterpolation() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 20}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 20}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "avg": {{Time: suite.basicQueryTime, Value: 15}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 1}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 10.5}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + selectParams, _, err := pquerier.ParseQuery("select avg_all(prev_val(cpu))") + suite.NoError(err) + selectParams.Step = 2 * tsdbtest.MinuteInMillis + selectParams.From = suite.basicQueryTime + selectParams.To = suite.basicQueryTime + 5*tsdbtest.MinuteInMillis + set, err := querierV2.Select(selectParams) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSameLabelMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "disk", + Labels: labels1, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{ + "sum-cpu": {{Time: suite.basicQueryTime, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 20}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 30}}, + "sum-disk": {{Time: suite.basicQueryTime, Value: 20}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 30}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu, disk", + Functions: "sum_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + suite.NoError(err) + + agg := set.At().Labels().Get(aggregate.AggregateLabel) + suite.NoError(err) + + metricName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + suite.NoError(err) + + suite.compareSingleMetricWithAggregator(data, expected, fmt.Sprintf("%v-%v", agg, metricName)) + } + + suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesDifferentLabelMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "darwin") + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}} + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 1}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData2}, + tsdbtest.Metric{ + Name: "disk", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "disk", + Labels: labels2, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := []tsdbtest.DataPoint{ + {Time: suite.basicQueryTime, Value: 30}, + {Time: suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, Value: 50}, + {Time: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, Value: 70}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu, disk", + Functions: "sum_all", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "Failed to execute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + suite.NoError(err) + + suite.compareSingleMetric(data, expected) + } + + suite.Require().Equal(2, seriesCount, "series count didn't match expected") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go new file mode 100644 index 00000000..19e81d08 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go @@ -0,0 +1,1321 @@ +// +build integration + +package pqueriertest + +import ( + "errors" + "fmt" + "math" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/frames" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testSelectDataframeSuite struct { + basicQueryTestSuite +} + +func TestSelectDataframeSuite(t *testing.T) { + suite.Run(t, new(testSelectDataframeSuite)) +} + +func (suite *testSelectDataframeSuite) TestAggregatesWithZeroStepSelectDataframe() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string]tsdbtest.DataPoint{"max": {Time: suite.basicQueryTime, Value: 40}, + "min": {Time: suite.basicQueryTime, Value: 10}, + "sum": {Time: suite.basicQueryTime, Value: 100}, + "count": {Time: suite.basicQueryTime, Value: 4}, + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "max, sum,count,min", Step: 0, From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 1, indexCol.Len()) + t, err := indexCol.TimeAt(0) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), suite.basicQueryTime, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + suite.Require().Equal(1, col.Len()) + currentColAggregate := strings.Split(col.Name(), "(")[0] + f, err := col.FloatAt(0) + assert.NoError(suite.T(), err) + + var expectedFloat float64 + switch val := expected[currentColAggregate].Value.(type) { + case int: + expectedFloat = float64(val) + case float64: + expectedFloat = val + default: + suite.Failf("invalid data type", "expected int or float, actual type is %t", val) + } + suite.Require().Equal(expectedFloat, f) + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestEmptyRawDataSelectDataframe() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime - 10*tsdbtest.MinuteInMillis, To: suite.basicQueryTime - 1*tsdbtest.MinuteInMillis} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + suite.Require().Equal(0, frame.Indices()[0].Len()) + + for _, colName := range frame.Names() { + col, _ := frame.Column(colName) + assert.Equal(suite.T(), 0, col.Len()) + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) Test2Series1EmptySelectDataframe() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: []tsdbtest.DataPoint{{suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, 10}}}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"cpu": ingestedData, + "diskio": {{suite.basicQueryTime, math.NaN()}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), math.NaN()}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, math.NaN()}}, + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params, _, _ := pquerier.ParseQuery("select cpu,diskio") + params.From = suite.basicQueryTime + params.To = suite.basicQueryTime + 4*tsdbtest.MinuteInMillis + + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), len(ingestedData), indexCol.Len()) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), ingestedData[i].Time, t.UnixNano()/int64(time.Millisecond)) + } + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + assert.Equal(suite.T(), len(ingestedData), col.Len()) + for i := 0; i < col.Len(); i++ { + currentExpected := expected[col.Name()][i].Value + switch val := currentExpected.(type) { + case float64: + fv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(fv)) { + assert.Equal(suite.T(), currentExpected, fv) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + sv, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, sv) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestStringAndFloatMetricsDataframe() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + metricName1 := "cpu" + metricName2 := "log" + labels := utils.LabelsFromStringList("os", "linux") + labelsWithName := append(labels, utils.LabelsFromStringList("__name__", metricName2)...) + + expectedTimeColumn := []int64{suite.basicQueryTime, suite.basicQueryTime + tsdbtest.MinuteInMillis, suite.basicQueryTime + 2*tsdbtest.MinuteInMillis} + logData := []interface{}{"a", "b", "c"} + expectedColumns := map[string][]interface{}{metricName1: {10.0, 20.0, 30.0}, + metricName2: logData} + appender, err := adapter.Appender() + suite.NoError(err, "failed to create v3io appender") + + ref, err := appender.Add(labelsWithName, expectedTimeColumn[0], logData[0]) + suite.NoError(err, "failed to add data to the TSDB appender") + for i := 1; i < len(expectedTimeColumn); i++ { + appender.AddFast(labels, ref, expectedTimeColumn[i], logData[i]) + } + + _, err = appender.WaitForCompletion(0) + suite.NoError(err, "failed to wait for TSDB append completion") + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels, + Data: []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}}}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName1}, {Metric: metricName2}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + suite.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err) + indexCol := frame.Indices()[0] + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + suite.Require().Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + var v interface{} + + column, err := frame.Column(columnName) + suite.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + suite.Require().Equal(expectedColumns[column.Name()][i], v, "column %v does not match at index %v", column.Name(), i) + } + } + } +} + +func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetricsWithMultipleLabelSets() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime + tsdbtest.MinuteInMillis, 20}} + ingestData3 := []tsdbtest.DataPoint{{suite.basicQueryTime, 30}, + {suite.basicQueryTime + tsdbtest.MinuteInMillis, 40}} + + expectedData := map[string][]tsdbtest.DataPoint{ + fmt.Sprintf("%v-%v", metricName1, "linux"): {{suite.basicQueryTime, 10}, {suite.basicQueryTime + tsdbtest.MinuteInMillis, math.NaN()}}, + fmt.Sprintf("%v-%v", metricName2, "linux"): {{suite.basicQueryTime, math.NaN()}, {suite.basicQueryTime + tsdbtest.MinuteInMillis, 20}}, + fmt.Sprintf("%v-%v", metricName2, "mac"): ingestData3} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels1, + Data: ingestData2}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels2, + Data: ingestData3}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Filter: "1==1", + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 2, indexCol.Len()) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), expectedData[fmt.Sprintf("%v-%v", metricName1, "linux")][i].Time, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + currentExpectedData := expectedData[fmt.Sprintf("%v-%v", col.Name(), frame.Labels()["os"])] + assert.Equal(suite.T(), len(currentExpectedData), col.Len()) + currentExpected := currentExpectedData[i].Value + + switch val := currentExpected.(type) { + case float64: + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + s, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, s) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestSelectDataframeAggregationsMetricsHaveBigGaps() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData1 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime - 4*tsdbtest.DaysInMillis), 20}} + + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 1*tsdbtest.DaysInMillis, 30}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu1", + Labels: labels1, + Data: ingestedData1}, + tsdbtest.Metric{ + Name: "cpu2", + Labels: labels1, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expectedTime := []int64{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, suite.basicQueryTime - 4*tsdbtest.DaysInMillis, suite.basicQueryTime - 1*tsdbtest.DaysInMillis} + expected := map[string][]float64{"count(cpu1)": {1, 1, math.NaN()}, + "count(cpu2)": {math.NaN(), math.NaN(), 1}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{ + Functions: "count", + Step: int64(tsdbtest.MinuteInMillis), + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var dataFrameCount int + for set.NextFrame() { + dataFrameCount++ + frame, err := set.GetFrame() + suite.Require().NoError(err) + suite.Require().Equal(len(expected), len(frame.Names()), "number of columns in frame does not match") + suite.Require().Equal(len(expectedTime), frame.Indices()[0].Len(), "columns size is not as expected") + + indexCol := frame.Indices()[0] + + for i := 0; i < len(expected); i++ { + t, err := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + suite.Require().NoError(err) + suite.Require().Equal(expectedTime[i], timeMillis) + + for _, currName := range frame.Names() { + currCol, err := frame.Column(currName) + suite.Require().NoError(err) + currVal, err := currCol.FloatAt(i) + + suite.Require().NoError(err) + if !(math.IsNaN(currVal) && math.IsNaN(expected[currName][i])) { + suite.Require().Equal(expected[currName][i], currVal) + } + } + } + } + + suite.Require().Equal(1, dataFrameCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestSelectDataframeDaownsampleMetricsHaveBigGaps() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData1 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime - 4*tsdbtest.DaysInMillis), 20}} + + ingestedData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 1*tsdbtest.DaysInMillis, 30}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu1", + Labels: labels1, + Data: ingestedData1}, + tsdbtest.Metric{ + Name: "cpu2", + Labels: labels1, + Data: ingestedData2}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expectedTime := []int64{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + suite.basicQueryTime - 4*tsdbtest.DaysInMillis - 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime - 4*tsdbtest.DaysInMillis - 1*tsdbtest.MinuteInMillis, + suite.basicQueryTime - 4*tsdbtest.DaysInMillis, + suite.basicQueryTime - 1*tsdbtest.DaysInMillis - 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime - 1*tsdbtest.DaysInMillis - 1*tsdbtest.MinuteInMillis, + suite.basicQueryTime - 1*tsdbtest.DaysInMillis} + expected := map[string][]float64{"cpu1": {10, 20, 20, 20, math.NaN(), math.NaN(), math.NaN()}, + "cpu2": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), 30, 30, 30}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{ + Step: int64(tsdbtest.MinuteInMillis), + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis, + To: suite.basicQueryTime} + set, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var dataFrameCount int + for set.NextFrame() { + dataFrameCount++ + frame, err := set.GetFrame() + suite.Require().NoError(err) + suite.Require().Equal(len(expected), len(frame.Names()), "number of columns in frame does not match") + suite.Require().Equal(len(expectedTime), frame.Indices()[0].Len(), "columns size is not as expected") + + indexCol := frame.Indices()[0] + + for i := 0; i < len(expected); i++ { + t, err := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + suite.Require().NoError(err) + suite.Require().Equal(expectedTime[i], timeMillis) + + for _, currName := range frame.Names() { + currCol, err := frame.Column(currName) + suite.Require().NoError(err) + currVal, err := currCol.FloatAt(i) + + suite.Require().NoError(err) + if !(math.IsNaN(currVal) && math.IsNaN(expected[currName][i])) { + suite.Require().Equal(expected[currName][i], currVal) + } + } + } + } + + suite.Require().Equal(1, dataFrameCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 15}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 18}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime + tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 22}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 26}} + + expectedData := map[string][]tsdbtest.DataPoint{ + metricName1: {{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 15}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 18}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, math.NaN()}}, + metricName2: {{suite.basicQueryTime, math.NaN()}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 22}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 26}}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels1, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Filter: "1==1", + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.SelectDataFrame(params) + suite.NoError(err, "failed to exeute query") + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 6, indexCol.Len()) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + suite.Require().Equal(expectedData[metricName1][i].Time, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + currentExpectedData := expectedData[col.Name()] + suite.Require().Equal(len(currentExpectedData), col.Len()) + currentExpected := currentExpectedData[i].Value + + switch val := currentExpected.(type) { + case float64: + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + s, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, s) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } + + suite.Require().Equal(1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestColumnOrder() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 15}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 18}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime + tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 22}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 26}} + + expectedData := map[string][]tsdbtest.DataPoint{ + metricName1: {{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 15}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 18}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, math.NaN()}}, + metricName2: {{suite.basicQueryTime, math.NaN()}, + {suite.basicQueryTime + 1*tsdbtest.MinuteInMillis, 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 22}, + {suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, 26}}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels1, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2") + + columnOrder := "diskio,cpu" + params := &pquerier.SelectParams{Name: columnOrder, + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.SelectDataFrame(params) + suite.NoError(err, "failed to exeute query") + + var seriesCount int + for set.NextFrame() { + seriesCount++ + frame, err := set.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 6, indexCol.Len()) + suite.Require().Equal(columnOrder, strings.Join(frame.Names(), ",")) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + suite.Require().Equal(expectedData[metricName1][i].Time, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + currentExpectedData := expectedData[col.Name()] + suite.Require().Equal(len(currentExpectedData), col.Len()) + currentExpected := currentExpectedData[i].Value + switch val := currentExpected.(type) { + case float64: + fv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(fv)) { + assert.Equal(suite.T(), currentExpected, fv) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + sv, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, sv) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } + + suite.Require().Equal(1, seriesCount, "series count didn't match expected") +} + +func (suite *testSelectDataframeSuite) TestQueryNonExistingMetric() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels := utils.LabelsFromStringList("os", "linux") + cpuData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels, + Data: cpuData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu, tal", + From: suite.basicQueryTime, To: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + suite.Require().NoError(err) + + expectedData := map[string][]tsdbtest.DataPoint{ + "cpu": {{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}}, + "tal": {{suite.basicQueryTime, math.NaN()}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), math.NaN()}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, math.NaN()}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, math.NaN()}}} + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err) + + indexCol := frame.Indices()[0] + assert.Equal(suite.T(), 4, indexCol.Len()) + for i := 0; i < indexCol.Len(); i++ { + t, err := indexCol.TimeAt(i) + assert.NoError(suite.T(), err) + suite.Require().Equal(expectedData["cpu"][i].Time, t.UnixNano()/int64(time.Millisecond)) + + for _, colName := range frame.Names() { + col, err := frame.Column(colName) + suite.NoError(err) + currentExpectedData := expectedData[col.Name()] + suite.Require().Equal(len(currentExpectedData), col.Len()) + currentExpected := currentExpectedData[i].Value + + switch val := currentExpected.(type) { + case float64: + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + s, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, s) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseStringAndNumericColumnsDataframe() { + requireCtx := suite.Require() + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + requireCtx.NoError(err, "failed to create v3io adapter") + + metricCpu := "cpu" + metricLog := "log" + labels := utils.LabelsFromStringList("os", "linux") + labelsWithNameLog := append(labels, utils.LabelsFromStringList("__name__", metricLog)...) + + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + + timeColumnLog := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + + dataLog := []interface{}{"a", "c", "d", "e"} + expectedColumns := map[string][]interface{}{ + metricCpu: {10.0, 20.0, 30.0, math.NaN(), 50.0}, + metricLog: {"a", "", "c", "d", "e"}} + appender, err := adapter.Appender() + requireCtx.NoError(err, "failed to create v3io appender") + + refLog, err := appender.Add(labelsWithNameLog, timeColumnLog[0], dataLog[0]) + suite.NoError(err, "failed to add data to the TSDB appender") + for i := 1; i < len(timeColumnLog); i++ { + appender.AddFast(labels, refLog, timeColumnLog[i], dataLog[i]) + } + + _, err = appender.WaitForCompletion(0) + requireCtx.NoError(err, "failed to wait for TSDB append completion") + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricCpu, + Labels: labels, + Data: []tsdbtest.DataPoint{ + {suite.basicQueryTime, 10.0}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20.0}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30.0}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 50.0}}}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricCpu}, {Metric: metricLog}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[column.Name()][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + requireCtx.Equal(expectedColumns[column.Name()][i], v, "column %v does not match at index %v", column.Name(), i) + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseNumericColumnsWithEmptyColumnsDataframe() { + requireCtx := suite.Require() + labelSetLinux := utils.LabelsFromStringList("os", "linux") + labelSetWindows := utils.LabelsFromStringList("os", "windows") + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]interface{}{ + "cpu_0-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_0-windows": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1-linux": {10.0, 20.0, 30.0, math.NaN(), 50.0}, + "cpu_1-windows": {math.NaN(), 22.0, 33.0, math.NaN(), 55.0}, + "cpu_2-linux": {math.NaN(), math.NaN(), math.NaN(), 40.4, 50.5}, + "cpu_2-windows": {10.0, 20.0, math.NaN(), 40.0, 50.0}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "cpu_0", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0] - 68*tsdbtest.HoursInMillis, 10.0}, + {expectedTimeColumn[1] - 69*tsdbtest.HoursInMillis, 20.0}, + {expectedTimeColumn[2] - 70*tsdbtest.HoursInMillis, 30.0}, + {expectedTimeColumn[3] - 71*tsdbtest.HoursInMillis, 40.0}, + {expectedTimeColumn[4] - 72*tsdbtest.HoursInMillis, 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + {expectedTimeColumn[2], 30.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + // NA + // NA + {expectedTimeColumn[3], 40.4}, + {expectedTimeColumn[4], 50.5}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + // NA + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + // NA + {expectedTimeColumn[1], 22.0}, + {expectedTimeColumn[2], 33.0}, + // NA + {expectedTimeColumn[4], 55.0}}}, + }}) + + adapter := tsdbtest.InsertData(suite.T(), testParams) + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu_0"}, {Metric: "cpu_1"}, {Metric: "cpu_2"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + osLabel := frame.Labels()["os"] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + var v interface{} + key := fmt.Sprintf("%v-%v", columnName, osLabel) + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[key][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + expectedValue := expectedColumns[key][i] + if !math.IsNaN(expectedValue.(float64)) || !math.IsNaN(v.(float64)) { + requireCtx.Equal(expectedValue, v, "column %v does not match at index %v", columnName, i) + } + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseNumericColumnsWithPartialLabelsDataframe() { + requireCtx := suite.Require() + labelSetLinux := utils.LabelsFromStringList("os", "linux") + labelSetWindows := utils.LabelsFromStringList("os", "windows") + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]interface{}{ + "cpu_0-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_0-windows": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1-linux": {10.0, 20.0, 30.0, 40.0, 50.0}, + "cpu_1-windows": {math.NaN(), 22.0, 33.0, math.NaN(), 55.0}, + "cpu_2-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_2-windows": {10.0, 20.0, math.NaN(), 40.0, 50.0}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "cpu_0", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0] - 68*tsdbtest.HoursInMillis, 10.0}, + {expectedTimeColumn[1] - 69*tsdbtest.HoursInMillis, 20.0}, + {expectedTimeColumn[2] - 70*tsdbtest.HoursInMillis, 30.0}, + {expectedTimeColumn[3] - 71*tsdbtest.HoursInMillis, 40.0}, + {expectedTimeColumn[4] - 72*tsdbtest.HoursInMillis, 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + {expectedTimeColumn[2], 30.0}, + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + // NA + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + // NA + {expectedTimeColumn[1], 22.0}, + {expectedTimeColumn[2], 33.0}, + // NA + {expectedTimeColumn[4], 55.0}}}, + }}) + + adapter := tsdbtest.InsertData(suite.T(), testParams) + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu_0"}, {Metric: "cpu_1"}, {Metric: "cpu_2"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + osLabel := frame.Labels()["os"] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + key := fmt.Sprintf("%v-%v", columnName, osLabel) + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[key][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + expectedValue := expectedColumns[key][i] + if !math.IsNaN(expectedValue.(float64)) || !math.IsNaN(v.(float64)) { + requireCtx.Equal(expectedValue, v, "column %v does not match at index %v", columnName, i) + } + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseNumericColumnsWithNotExistingMetricDataframe() { + requireCtx := suite.Require() + labelSetLinux := utils.LabelsFromStringList("os", "linux") + labelSetWindows := utils.LabelsFromStringList("os", "windows") + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]interface{}{ + "cpu_0-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1-linux": {10.0, 20.0, 30.0, 40.0, 50.0}, + "cpu_2-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "fake-linux": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_0-windows": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1-windows": {math.NaN(), 22.0, 33.0, math.NaN(), 55.0}, + "cpu_2-windows": {10.0, 20.0, math.NaN(), 40.0, 50.0}, + "fake-windows": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "cpu_0", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0] - 68*tsdbtest.HoursInMillis, 10.0}, + {expectedTimeColumn[1] - 69*tsdbtest.HoursInMillis, 20.0}, + {expectedTimeColumn[2] - 70*tsdbtest.HoursInMillis, 30.0}, + {expectedTimeColumn[3] - 71*tsdbtest.HoursInMillis, 40.0}, + {expectedTimeColumn[4] - 72*tsdbtest.HoursInMillis, 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + {expectedTimeColumn[2], 30.0}, + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + // NA + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + // NA + {expectedTimeColumn[1], 22.0}, + {expectedTimeColumn[2], 33.0}, + // NA + {expectedTimeColumn[4], 55.0}}}, + }}) + + adapter := tsdbtest.InsertData(suite.T(), testParams) + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu_0"}, {Metric: "cpu_1"}, {Metric: "cpu_2"}, {Metric: "fake"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + osLabel := frame.Labels()["os"] + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %d", i) + for _, columnName := range frame.Names() { + key := fmt.Sprintf("%v-%v", columnName, osLabel) + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + + bothNaN := math.IsNaN(expectedColumns[key][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + expectedValue := expectedColumns[key][i] + if !math.IsNaN(expectedValue.(float64)) || !math.IsNaN(v.(float64)) { + requireCtx.Equal(expectedValue, v, "column %v does not match at index %d", columnName, i) + } + } + } + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go new file mode 100644 index 00000000..e2064425 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go @@ -0,0 +1,183 @@ +// +build integration + +package pqueriertest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testDownsampleSuite struct { + basicQueryTestSuite +} + +func TestDownsampleSuite(t *testing.T) { + suite.Run(t, new(testDownsampleSuite)) +} + +func (suite *testDownsampleSuite) TestDownSampleNotReturningAggrAttr() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 6*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 9*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Step: 2 * int64(tsdbtest.MinuteInMillis), From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + labels := set.At().Labels() + suite.Require().Empty(labels.Get(aggregate.AggregateLabel)) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testDownsampleSuite) TestRawDataSinglePartitionWithDownSample() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 6*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 9*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 6*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 8*tsdbtest.MinuteInMillis, 40}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Step: 2 * int64(tsdbtest.MinuteInMillis), + From: suite.basicQueryTime, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testDownsampleSuite) TestRawDataDownSampleMultiPartitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestData := []tsdbtest.DataPoint{{suite.toMillis("2018-11-18T23:40:00Z"), 10}, + {suite.toMillis("2018-11-18T23:59:00Z"), 20}, + {suite.toMillis("2018-11-19T00:20:00Z"), 30}, + {suite.toMillis("2018-11-19T02:40:00Z"), 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expectedData := []tsdbtest.DataPoint{{suite.toMillis("2018-11-18T22:00:00Z"), 10}, + {suite.toMillis("2018-11-19T00:00:00Z"), 30}, + {suite.toMillis("2018-11-19T02:00:00Z"), 40}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu"}}, + Step: 2 * int64(tsdbtest.HoursInMillis), + From: suite.toMillis("2018-11-18T22:00:00Z"), + To: suite.toMillis("2018-11-19T4:00:00Z")} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/get_labelsets_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/get_labelsets_integration_test.go new file mode 100644 index 00000000..0e95cba0 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/get_labelsets_integration_test.go @@ -0,0 +1,248 @@ +// +build integration + +package pqueriertest + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type getLabelSetsSuite struct { + suite.Suite + v3ioConfig *config.V3ioConfig + suiteTimestamp int64 + basicQueryTime int64 +} + +func TestGetLabelSetsSuite(t *testing.T) { + suite.Run(t, new(getLabelSetsSuite)) +} + +func (suite *getLabelSetsSuite) SetupSuite() { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + if err != nil { + suite.T().Fatalf("unable to load configuration. Error: %v", err) + } + + suite.v3ioConfig = v3ioConfig + suite.suiteTimestamp = time.Now().Unix() + suite.basicQueryTime, err = tsdbtest.DateStringToMillis("2018-07-21T10:00:00Z") + suite.NoError(err) +} + +func (suite *getLabelSetsSuite) SetupTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + tsdbtest.CreateTestTSDB(suite.T(), suite.v3ioConfig) +} + +func (suite *getLabelSetsSuite) TearDownTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + if !suite.T().Failed() { + tsdbtest.DeleteTSDB(suite.T(), suite.v3ioConfig) + } +} + +func (suite *getLabelSetsSuite) TestGetLabels() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[2], + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "mac", "region", "europe", config.PrometheusMetricNameAttribute, "cpu")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("cpu", "") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} + +func (suite *getLabelSetsSuite) TestGetLabelsAllMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels[2], + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "mac", "region", "europe", config.PrometheusMetricNameAttribute, "diskio")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("", "") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} + +func (suite *getLabelSetsSuite) TestGetLabelsAllSpecificMetric() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels[2], + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("cpu", "") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} + +func (suite *getLabelSetsSuite) TestGetLabelsWithFilter() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[2], + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("cpu", "os=='linux'") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} + +func (suite *getLabelSetsSuite) TestGetLabelsAllMetricsFrom2Partitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels := []utils.Labels{utils.LabelsFromStringList("os", "linux", "region", "europe"), + utils.LabelsFromStringList("os", "linux", "region", "asia"), + utils.LabelsFromStringList("os", "mac", "region", "europe")} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels[0], + Data: []tsdbtest.DataPoint{{suite.basicQueryTime - 4*tsdbtest.DaysInMillis, 10}}}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[1], + Data: []tsdbtest.DataPoint{{suite.basicQueryTime - 4*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 2*tsdbtest.DaysInMillis, 10}}}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels[2], + Data: []tsdbtest.DataPoint{{suite.basicQueryTime, 10}}}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedLabels := []utils.Labels{ + utils.LabelsFromStringList("os", "linux", "region", "asia", config.PrometheusMetricNameAttribute, "cpu"), + utils.LabelsFromStringList("os", "mac", "region", "europe", config.PrometheusMetricNameAttribute, "cpu")} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + labelsList, err := querierV2.GetLabelSets("", "") + if err != nil { + suite.T().Fatalf("failed to get label sets, err:%v\n", err) + } + + suite.Require().ElementsMatch(expectedLabels, labelsList, "actual label sets does not match expected") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go new file mode 100644 index 00000000..8346aea1 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go @@ -0,0 +1,86 @@ +// +build integration + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package pqueriertest + +import ( + "fmt" + "time" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" +) + +type basicQueryTestSuite struct { + suite.Suite + v3ioConfig *config.V3ioConfig + suiteTimestamp int64 + basicQueryTime int64 +} + +func (suite *basicQueryTestSuite) toMillis(date string) int64 { + time, err := tsdbtest.DateStringToMillis(date) + suite.NoError(err) + return time +} + +func (suite *basicQueryTestSuite) SetupSuite() { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + if err != nil { + suite.T().Fatalf("unable to load configuration. Error: %v", err) + } + + suite.v3ioConfig = v3ioConfig + suite.suiteTimestamp = time.Now().Unix() + suite.basicQueryTime = suite.toMillis("2018-07-21T21:40:00Z") +} + +func (suite *basicQueryTestSuite) SetupTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + tsdbtest.CreateTestTSDB(suite.T(), suite.v3ioConfig) +} + +func (suite *basicQueryTestSuite) TearDownTest() { + suite.v3ioConfig.TablePath = fmt.Sprintf("%s-%v", suite.T().Name(), suite.suiteTimestamp) + if !suite.T().Failed() { + tsdbtest.DeleteTSDB(suite.T(), suite.v3ioConfig) + } +} + +func (suite *basicQueryTestSuite) compareSingleMetric(data []tsdbtest.DataPoint, expected []tsdbtest.DataPoint) { + for i, dataPoint := range data { + suite.Require().True(dataPoint.Equals(expected[i]), "queried data does not match expected") + } +} + +func (suite *basicQueryTestSuite) compareSingleMetricWithAggregator(data []tsdbtest.DataPoint, expected map[string][]tsdbtest.DataPoint, agg string) { + for i, dataPoint := range data { + suite.Require().True(dataPoint.Equals(expected[agg][i]), "queried data does not match expected") + } +} + +func (suite *basicQueryTestSuite) compareMultipleMetrics(data []tsdbtest.DataPoint, expected map[string]map[string][]tsdbtest.DataPoint, metricName string, aggr string) { + for i, dataPoint := range data { + suite.Require().True(dataPoint.Equals(expected[metricName][aggr][i]), "queried data does not match expected") + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go new file mode 100644 index 00000000..cb7e646f --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go @@ -0,0 +1,381 @@ +// +build integration + +package pqueriertest + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testSQLSyntaxQuerySuite struct { + basicQueryTestSuite +} + +func TestSQLSyntaxQuerySuite(t *testing.T) { + suite.Run(t, new(testSQLSyntaxQuerySuite)) +} + +func (suite *testSQLSyntaxQuerySuite) TestGroupByOneLabelSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux", "region", "europe") + labels2 := utils.LabelsFromStringList("os", "mac", "region", "europe") + labels3 := utils.LabelsFromStringList("os", "linux", "region", "americas") + labels4 := utils.LabelsFromStringList("os", "linux", "region", "asia") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels3, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels4, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string]map[string][]tsdbtest.DataPoint{ + "linux": { + "sum": {{Time: suite.basicQueryTime, Value: 30}}, + "count": {{Time: suite.basicQueryTime, Value: 3}}}, + "mac": { + "sum": {{Time: suite.basicQueryTime, Value: 10}}, + "count": {{Time: suite.basicQueryTime, Value: 1}}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,count", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), + GroupBy: "os"} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "failed to exeute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + groupByValue := set.At().Labels().Get("os") + suite.Require().NoError(err) + suite.compareMultipleMetrics(data, expected, groupByValue, agg) + } + + suite.Require().Equal(4, seriesCount, "series count didn't match expected") +} + +func (suite *testSQLSyntaxQuerySuite) TestGroupByMultipleLabelsSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux", "region", "europe", "version", "1") + labels2 := utils.LabelsFromStringList("os", "linux", "region", "europe", "version", "2") + labels3 := utils.LabelsFromStringList("os", "linux", "region", "americas", "version", "3") + labels4 := utils.LabelsFromStringList("os", "mac", "region", "asia", "version", "1") + labels5 := utils.LabelsFromStringList("os", "mac", "region", "asia", "version", "2") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels3, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels4, + Data: ingestedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels5, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + groupBy := []string{"os", "region"} + expected := map[string]map[string][]tsdbtest.DataPoint{ + "linux-europe": { + "sum": {{Time: suite.basicQueryTime, Value: 20}}, + "count": {{Time: suite.basicQueryTime, Value: 2}}}, + "linux-americas": { + "sum": {{Time: suite.basicQueryTime, Value: 10}}, + "count": {{Time: suite.basicQueryTime, Value: 1}}}, + "mac-asia": { + "sum": {{Time: suite.basicQueryTime, Value: 20}}, + "count": {{Time: suite.basicQueryTime, Value: 2}}}} + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,count", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), + GroupBy: strings.Join(groupBy, ",")} + set, err := querierV2.Select(params) + suite.Require().NoError(err, "failed to exeute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + var groupByValue []string + for _, label := range groupBy { + groupByValue = append(groupByValue, set.At().Labels().Get(label)) + } + labelsStr := strings.Join(groupByValue, "-") + + suite.Require().NoError(err) + suite.compareMultipleMetrics(data, expected, labelsStr, agg) + } + + suite.Require().Equal(6, seriesCount, "series count didn't match expected") +} + +func (suite *testSQLSyntaxQuerySuite) TestGroupByNotExistingLabel() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.Require().NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux", "region", "europe") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + suite.Require().NoError(err, "failed to create querier v2") + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,count", + Step: 2 * 60 * 1000, + From: suite.basicQueryTime, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), + GroupBy: "something that does not exist"} + _, err = querierV2.Select(params) + if err == nil { + suite.T().Fatalf("expected fail but continued normally") + } +} + +func (suite *testSQLSyntaxQuerySuite) TestAggregateSeriesWithAlias() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedResult := 40.0 + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + aliasName := "iguaz" + params, _, _ := pquerier.ParseQuery(fmt.Sprintf("select max(cpu) as %v", aliasName)) + + params.From = suite.basicQueryTime + params.To = suite.basicQueryTime + int64(numberOfEvents*eventsInterval) + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + assert.Equal(suite.T(), 1, len(data), "queried data does not match expected") + assert.Equal(suite.T(), expectedResult, data[0].Value, "queried data does not match expected") + + seriesName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + suite.Equal(aliasName, seriesName) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testSQLSyntaxQuerySuite) TestAggregateSeriesWildcardOnPartOfTheColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedResult := map[string]float64{"max(cpu)": 40, "max(diskio)": 40, "min(cpu)": 10} + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2") + + params, _, _ := pquerier.ParseQuery("select max(*), min(cpu)") + + params.From = suite.basicQueryTime + params.To = suite.basicQueryTime + int64(numberOfEvents*eventsInterval) + + set, err := querierV2.Select(params) + suite.NoError(err, "failed to exeute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + labels := set.At().Labels() + expectedKey := fmt.Sprintf("%v(%v)", labels.Get(aggregate.AggregateLabel), labels.Get(config.PrometheusMetricNameAttribute)) + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + suite.Require().Equal(1, len(data), "queried data does not match expected") + suite.Require().Equal(expectedResult[expectedKey], data[0].Value, "queried data does not match expected") + } + + suite.Require().Equal(len(expectedResult), seriesCount, "series count didn't match expected") +} + +func (suite *testSQLSyntaxQuerySuite) TestAggregateSeriesWildcardOnPartOfTheColumnsWithVirtualColumn() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + suite.NoError(err, "failed to create v3io adapter") + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + expectedResult := map[string]float64{"avg(cpu)": 25, "avg(diskio)": 25, "min(cpu)": 10} + + querierV2, err := adapter.QuerierV2() + suite.NoError(err, "failed to create querier v2") + + params, _, _ := pquerier.ParseQuery("select avg(*), min(cpu)") + + params.From = suite.basicQueryTime + params.To = suite.basicQueryTime + int64(numberOfEvents*eventsInterval) + + set, err := querierV2.Select(params) + suite.NoError(err, "failed to exeute query") + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + labels := set.At().Labels() + expectedKey := fmt.Sprintf("%v(%v)", labels.Get(aggregate.AggregateLabel), labels.Get(config.PrometheusMetricNameAttribute)) + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + suite.Require().Equal(1, len(data), "queried data does not match expected") + suite.Require().Equal(expectedResult[expectedKey], data[0].Value, "queried data does not match expected") + } + + suite.Require().Equal(len(expectedResult), seriesCount, "series count didn't match expected") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go new file mode 100644 index 00000000..3a61864c --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go @@ -0,0 +1,792 @@ +// +build integration + +package pqueriertest + +import ( + "errors" + "fmt" + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testRawQuerySuite struct { + basicQueryTestSuite +} + +func TestRawQuerySuite(t *testing.T) { + suite.Run(t, new(testRawQuerySuite)) +} + +func (suite *testRawQuerySuite) TestRawDataSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestRawDataMultiplePartitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*eventsInterval, 30}, + {suite.basicQueryTime + 3*eventsInterval, 40}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestFilterOnLabel() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*eventsInterval, 30}, + {suite.basicQueryTime + 3*eventsInterval, 40}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Filter: "os=='linux'", + From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expectedData) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestQueryWithBadTimeParameters() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime + int64(numberOfEvents*eventsInterval), To: suite.basicQueryTime} + _, err = querierV2.Select(params) + if err == nil { + suite.T().Fatalf("expected to get error but no error was returned") + } +} + +func (suite *testRawQuerySuite) TestSelectRawDataByRequestedColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := ingestedData + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expected { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestRawDataMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*eventsInterval, 30}, + {suite.basicQueryTime + 4*eventsInterval, 40}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 5*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + 2*tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 3*eventsInterval, 30}, + {suite.basicQueryTime + 4*eventsInterval, 40}} + + expectedData := map[string][]tsdbtest.DataPoint{metricName1: ingestData1, metricName2: ingestData2} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels2, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName1}, {Metric: metricName2}}, + From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + name := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expectedData[name] { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestDataFrameRawDataMultipleMetrics() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + expectedTimeColumn := []int64{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, suite.basicQueryTime - 5*tsdbtest.DaysInMillis, + suite.basicQueryTime + tsdbtest.MinuteInMillis, suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]float64{metricName1: {10, math.NaN(), 20, 30, math.NaN(), 40}, + metricName2: {math.NaN(), 10, math.NaN(), 20, 30, 40}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}}}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels2, + Data: []tsdbtest.DataPoint{{suite.basicQueryTime - 5*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + 2*tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 40}}}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName1}, {Metric: metricName2}}, + From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + iter, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err) + indexCol := frame.Indices()[0] // in tsdb we have only one index + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + assert.Equal(suite.T(), expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + column, err := frame.Column(columnName) + suite.NoError(err) + v, _ := column.FloatAt(i) + + expected := expectedColumns[column.Name()][i] + + // assert can not compare NaN, so we need to check it manually + if !(math.IsNaN(expected) && math.IsNaN(v)) { + assert.Equal(suite.T(), expectedColumns[column.Name()][i], v, "column %v does not match at index %v", column.Name(), i) + } + } + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestQueryMultipleMetricsWithMultipleLabelSets() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + metricName1 := "cpu" + metricName2 := "diskio" + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + ingestData1 := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime, 20}} + ingestData3 := []tsdbtest.DataPoint{{suite.basicQueryTime, 30}, + {suite.basicQueryTime + tsdbtest.MinuteInMillis, 40}} + + expectedData := map[string][]tsdbtest.DataPoint{fmt.Sprintf("%v-%v", metricName1, "linux"): ingestData1, + fmt.Sprintf("%v-%v", metricName2, "linux"): ingestData2, + fmt.Sprintf("%v-%v", metricName2, "mac"): ingestData3} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricName1, + Labels: labels1, + Data: ingestData1}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels1, + Data: ingestData2}, + tsdbtest.Metric{ + Name: metricName2, + Labels: labels2, + Data: ingestData3}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Filter: "1==1", + From: suite.basicQueryTime, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + name := set.At().Labels().Get(config.PrometheusMetricNameAttribute) + os := set.At().Labels().Get("os") + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expectedData[fmt.Sprintf("%v-%v", name, os)] { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestDifferentLabelSetsInDifferentPartitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels2 := utils.LabelsFromStringList("os", "mac") + + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 9*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis, 40}, + {suite.basicQueryTime, 40}} + + expected := []tsdbtest.DataPoint{{suite.basicQueryTime, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{From: suite.basicQueryTime - 9*tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetric(data, expected) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestDifferentMetricsInDifferentPartitions() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestData2 := []tsdbtest.DataPoint{{suite.basicQueryTime - 9*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis, 10}, + {suite.basicQueryTime, 40}} + + expected := []tsdbtest.DataPoint{{suite.basicQueryTime, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "diskio", + Labels: labels1, + Data: ingestData2}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{From: suite.basicQueryTime - 9*tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i, dataPoint := range expected { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testRawQuerySuite) TestQueryMetricDoesNotHaveData() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels := utils.LabelsFromStringList("os", "linux") + cpuData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + diskioData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels, + Data: cpuData}, + tsdbtest.Metric{ + Name: "diskio", + Labels: labels, + Data: diskioData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu, diskio", + From: suite.basicQueryTime + tsdbtest.MinuteInMillis, + To: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + expectedTimeColumn := []int64{suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]float64{"cpu": {20, 30, 40}, + "diskio": {math.NaN(), math.NaN(), math.NaN()}} + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err, "failed to get frame") + indexCol := frame.Indices()[0] // in tsdb we have only one index + suite.Require().Equal(len(expectedColumns), len(frame.Names()), + "columns size does not match expected, got: %v", frame.Names()) + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + suite.Require().Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + column, err := frame.Column(columnName) + suite.NoError(err) + v, _ := column.FloatAt(i) + + expected := expectedColumns[columnName][i] + + // assert can not compare NaN, so we need to check it manually + if !(math.IsNaN(expected) && math.IsNaN(v)) { + suite.Require().Equal(expectedColumns[column.Name()][i], v, "column %v does not match at index %v", column.Name(), i) + } + } + } + } +} + +// Regression test for IG-13690 +func (suite *testRawQuerySuite) TestQueryMultiMetricsInconsistentLabels() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels := utils.LabelsFromStringList("os", "linux") + cpuData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + diskioData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{Name: "cpu", Labels: labels, Data: cpuData}, + tsdbtest.Metric{Name: "diskio", Data: diskioData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{ + Name: "cpu, diskio", + From: suite.basicQueryTime, + To: suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, + } + iter, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + for iter.Next() { + suite.NotNil(iter.At(), "Iterator yielded a nil series") + } +} + +func (suite *testRawQuerySuite) TestLoadPartitionsFromAttributes() { + suite.v3ioConfig.LoadPartitionsFromSchemaAttr = true + defer func() { suite.v3ioConfig.LoadPartitionsFromSchemaAttr = false }() + + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + labels2 := utils.LabelsFromStringList("os", "mac") + numberOfEvents := 5 + eventsInterval := int64(tsdbtest.MinuteInMillis) + expectedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*eventsInterval, 30}, + {suite.basicQueryTime + 3*eventsInterval, 40}} + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: expectedData}, + tsdbtest.Metric{ + Name: "cpu", + Labels: labels2, + Data: expectedData}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", From: suite.basicQueryTime - 8*tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents)*eventsInterval} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + + for i := 0; i < len(expectedData); i++ { + assert.Equal(suite.T(), expectedData[i].Time, data[i].Time) + currentExpected := expectedData[i].Value + switch val := currentExpected.(type) { + case float64: + assert.Equal(suite.T(), val, data[i].Value) + case int: + assert.Equal(suite.T(), float64(val), data[i].Value) + case string: + assert.Equal(suite.T(), val, data[i].Value) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go new file mode 100644 index 00000000..4c579fff --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go @@ -0,0 +1,383 @@ +// +build integration + +package pqueriertest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testServerAggregatesSuite struct { + basicQueryTestSuite +} + +func TestServerAggregatesSuite(t *testing.T) { + suite.Run(t, new(testServerAggregatesSuite)) +} + +func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: 100}}, + "min": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: 10}}, + "max": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: 40}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min", + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartitionNegativeValues() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, -10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), -20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, -30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, -40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: -100}}, + "min": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: -40}}, + "max": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: -10}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min", + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + firstStepTime := suite.basicQueryTime - 7*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis + secondStepTime := suite.basicQueryTime - 1*tsdbtest.HoursInMillis + + expected := map[string][]tsdbtest.DataPoint{ + "sum": {{Time: firstStepTime, Value: 10}, {Time: secondStepTime, Value: 90}}, + "min": {{Time: firstStepTime, Value: 10}, {Time: secondStepTime, Value: 20}}, + "max": {{Time: firstStepTime, Value: 10}, {Time: secondStepTime, Value: 40}}, + "sqr": {{Time: firstStepTime, Value: 100}, {Time: secondStepTime, Value: 2900}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum,max,min,sqr", + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartitionNonConcreteAggregates() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - 7*tsdbtest.DaysInMillis, 10}, + {suite.basicQueryTime - 7*tsdbtest.DaysInMillis + tsdbtest.MinuteInMillis, 12}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + firstStepTime := suite.basicQueryTime - 7*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis + secondStepTime := suite.basicQueryTime - 1*tsdbtest.HoursInMillis + + expected := map[string][]tsdbtest.DataPoint{"avg": {{Time: firstStepTime, Value: 11}, {Time: secondStepTime, Value: 30}}, + "stdvar": {{Time: firstStepTime, Value: 2}, {Time: secondStepTime, Value: 100}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "avg,stdvar", + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 7*tsdbtest.DaysInMillis - 1*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestSelectServerAggregatesAndRawByRequestedColumns() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": {{Time: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, Value: 100}}, + "": {{suite.basicQueryTime - 4*tsdbtest.HoursInMillis, 10}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu", Function: "sum"}, {Metric: "cpu", Interpolator: "next_val", InterpolationTolerance: 5 * tsdbtest.HoursInMillis}}, + Step: 4 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime - 4*tsdbtest.HoursInMillis, + To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") +} + +func (suite *testServerAggregatesSuite) TestAggregatesWithDisabledClientAggregation() { + suite.v3ioConfig.DisableClientAggr = true + defer func() { suite.v3ioConfig.DisableClientAggr = false }() + + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + eventsInterval := 60 * 1000 + + ingestedData := []tsdbtest.DataPoint{{suite.basicQueryTime - tsdbtest.DaysInMillis, 10}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30}, + {suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, 40}} + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"avg": {{Time: suite.basicQueryTime - tsdbtest.DaysInMillis, Value: 10}, + {Time: suite.basicQueryTime - tsdbtest.HoursInMillis, Value: 30}}} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", Functions: "avg", From: suite.basicQueryTime - tsdbtest.DaysInMillis, To: suite.basicQueryTime + int64(numberOfEvents*eventsInterval)} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/variant_type_query_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/variant_type_query_integration_test.go new file mode 100644 index 00000000..9c82557e --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/variant_type_query_integration_test.go @@ -0,0 +1,284 @@ +// +build integration + +package pqueriertest + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testVariantTypeSuite struct { + basicQueryTestSuite +} + +func TestVariantTypeSuite(t *testing.T) { + suite.Run(t, new(testVariantTypeSuite)) +} + +func (suite *testVariantTypeSuite) TestVariantTypeQueryWithDataFrame() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + metricName := "log" + labels := utils.LabelsFromStringList("os", "linux", "__name__", metricName) + + dataToIngest := []string{"a", "b", "c", "d", "e"} + numberOfEvents := len(dataToIngest) + var expectedTimeColumn []int64 + for i := 0; i < numberOfEvents; i++ { + expectedTimeColumn = append(expectedTimeColumn, suite.basicQueryTime+int64(i)*tsdbtest.MinuteInMillis) + } + + appender, err := adapter.Appender() + if err != nil { + suite.T().Fatalf("failed to create v3io appender. reason: %s", err) + } + + ref, err := appender.Add(labels, expectedTimeColumn[0], dataToIngest[0]) + if err != nil { + suite.T().Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for i := 1; i < numberOfEvents; i++ { + appender.AddFast(labels, ref, expectedTimeColumn[i], dataToIngest[i]) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + suite.T().Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName}}, + From: suite.basicQueryTime - tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + iter, err := querierV2.SelectDataFrame(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + suite.NoError(err) + indexCol := frame.Indices()[0] // in tsdb we have only one index + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + assert.Equal(suite.T(), expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + column, err := frame.Column(columnName) + suite.NoError(err) + v, _ := column.StringAt(i) + + expected := dataToIngest[i] + + assert.Equal(suite.T(), expected, v, "column %v does not match at index %v", column.Name(), i) + } + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testVariantTypeSuite) TestVariantTypeQueryWithSeries() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + metricName := "log" + labels := utils.LabelsFromStringList("os", "linux", "__name__", metricName) + + dataToIngest := []string{"a", "b", "c", "d", "e"} + numberOfEvents := len(dataToIngest) + var expectedTimeColumn []int64 + for i := 0; i < numberOfEvents; i++ { + expectedTimeColumn = append(expectedTimeColumn, suite.basicQueryTime+int64(i)*tsdbtest.MinuteInMillis) + } + + appender, err := adapter.Appender() + if err != nil { + suite.T().Fatalf("failed to create v3io appender. reason: %s", err) + } + + ref, err := appender.Add(labels, expectedTimeColumn[0], dataToIngest[0]) + if err != nil { + suite.T().Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for i := 1; i < numberOfEvents; i++ { + appender.AddFast(labels, ref, expectedTimeColumn[i], dataToIngest[i]) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + suite.T().Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName}}, + From: suite.basicQueryTime - tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + iter, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + var seriesCount int + for iter.Next() { + seriesCount++ + iter := iter.At().Iterator() + var i int + for iter.Next() { + t, v := iter.AtString() + assert.Equal(suite.T(), expectedTimeColumn[i], t, "time does not match at index %v", i) + assert.Equal(suite.T(), dataToIngest[i], v, "value does not match at index %v", i) + i++ + } + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testVariantTypeSuite) TestCountAggregationForVariantTypeQueryWithSeries() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + metricName := "log" + labels := utils.LabelsFromStringList("os", "linux", "__name__", metricName) + + dataToIngest := []string{"a", "b", "c", "d", "e", "f"} + numberOfEvents := len(dataToIngest) + var expectedTimeColumn []int64 + for i := 0; i < numberOfEvents; i++ { + expectedTimeColumn = append(expectedTimeColumn, suite.basicQueryTime+int64(i)*tsdbtest.MinuteInMillis) + } + + expected := map[string][]tsdbtest.DataPoint{"count": {{Time: suite.basicQueryTime - 5*tsdbtest.MinuteInMillis, Value: numberOfEvents}}} + + appender, err := adapter.Appender() + if err != nil { + suite.T().Fatalf("failed to create v3io appender. reason: %s", err) + } + + ref, err := appender.Add(labels, expectedTimeColumn[0], dataToIngest[0]) + if err != nil { + suite.T().Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for i := 1; i < numberOfEvents; i++ { + appender.AddFast(labels, ref, expectedTimeColumn[i], dataToIngest[i]) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + suite.T().Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{ + From: suite.basicQueryTime - tsdbtest.DaysInMillis, + To: suite.basicQueryTime + tsdbtest.DaysInMillis, + Functions: "count", + Step: 10 * tsdbtest.MinuteInMillis} + + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + suite.T().Fatal(err) + } + labels := set.At().Labels() + agg := labels.Get(aggregate.AggregateLabel) + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testVariantTypeSuite) TestVariantTypeQueryWithSeriesAlotOfData() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + metricName := "log" + labels := utils.LabelsFromStringList("os", "linux", "__name__", metricName) + + numberOfEvents := 1000 + dataToIngest := make([]tsdbtest.DataPoint, numberOfEvents) + for i := 0; i < numberOfEvents; i++ { + dataToIngest[i] = tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.MinuteInMillis, + Value: fmt.Sprintf("%v", i)} + } + + appender, err := adapter.Appender() + if err != nil { + suite.T().Fatalf("failed to create v3io appender. reason: %s", err) + } + + ref, err := appender.Add(labels, dataToIngest[0].Time, dataToIngest[0].Value) + if err != nil { + suite.T().Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for i := 1; i < numberOfEvents; i++ { + appender.AddFast(labels, ref, dataToIngest[i].Time, dataToIngest[i].Value) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + suite.T().Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricName}}, + From: suite.basicQueryTime - tsdbtest.DaysInMillis, To: suite.basicQueryTime + tsdbtest.DaysInMillis} + iter, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + var seriesCount int + for iter.Next() { + seriesCount++ + iter := iter.At().Iterator() + var slice []tsdbtest.DataPoint + for iter.Next() { + t, v := iter.AtString() + slice = append(slice, tsdbtest.DataPoint{Time: t, Value: v}) + } + + suite.Require().Equal(dataToIngest, slice, "number of events mismatch") + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go new file mode 100644 index 00000000..be0a304d --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go @@ -0,0 +1,478 @@ +// +build integration + +package pqueriertest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testWindowAggregationSuite struct { + basicQueryTestSuite +} + +func TestWindowAggregationSuite(t *testing.T) { + suite.Run(t, new(testWindowAggregationSuite)) +} + +func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowBiggerThanStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.MinuteInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 150}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 390}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.MinuteInMillis, + AggregationWindow: 6 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowSmallerThanStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.MinuteInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 120}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 170}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.MinuteInMillis, + AggregationWindow: 2 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowEqualToStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.MinuteInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis, Value: 150}, + {Time: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis, Value: 300}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.MinuteInMillis, + AggregationWindow: 5 * tsdbtest.MinuteInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowExceedsPartition() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + + ingestedData := []tsdbtest.DataPoint{{Time: suite.toMillis("2018-07-19T23:50:00Z"), Value: 1}, + {Time: suite.toMillis("2018-07-19T23:55:00Z"), Value: 2}, + {Time: suite.toMillis("2018-07-19T23:57:00Z"), Value: 3}, + {Time: suite.toMillis("2018-07-20T00:10:00Z"), Value: 4}, + {Time: suite.toMillis("2018-07-20T00:20:00Z"), Value: 5}, + {Time: suite.toMillis("2018-07-20T00:30:00Z"), Value: 6}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.toMillis("2018-07-20T00:10:00Z"), Value: 10}, + {Time: suite.toMillis("2018-07-20T00:20:00Z"), Value: 15}, + {Time: suite.toMillis("2018-07-20T00:30:00Z"), Value: 15}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 10 * tsdbtest.MinuteInMillis, + AggregationWindow: 30 * tsdbtest.MinuteInMillis, + From: suite.toMillis("2018-07-20T00:10:00Z"), + To: suite.toMillis("2018-07-20T00:30:00Z")} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowBiggerThanStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.HoursInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.HoursInMillis, Value: 150}, + {Time: suite.basicQueryTime + 10*tsdbtest.HoursInMillis, Value: 350}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.HoursInMillis, + AggregationWindow: 6 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.HoursInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqualToStep() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.HoursInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 5*tsdbtest.HoursInMillis, Value: 150}, + {Time: suite.basicQueryTime + 10*tsdbtest.HoursInMillis, Value: 300}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 5 * tsdbtest.HoursInMillis, + AggregationWindow: 5 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.HoursInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} + +func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqualToRollupInterval() { + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + if err != nil { + suite.T().Fatalf("failed to create v3io adapter. reason: %s", err) + } + + labels1 := utils.LabelsFromStringList("os", "linux") + numberOfEvents := 10 + + var ingestedData []tsdbtest.DataPoint + + for i := 0; i < numberOfEvents; i++ { + ingestedData = append(ingestedData, tsdbtest.DataPoint{Time: suite.basicQueryTime + int64(i)*tsdbtest.HoursInMillis, Value: 10 * float64(i)}) + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: labels1, + Data: ingestedData}, + }}) + tsdbtest.InsertData(suite.T(), testParams) + + expected := map[string][]tsdbtest.DataPoint{"sum": { + {Time: suite.basicQueryTime, Value: 0}, + {Time: suite.basicQueryTime + 1*tsdbtest.HoursInMillis, Value: 10}, + {Time: suite.basicQueryTime + 2*tsdbtest.HoursInMillis, Value: 20}, + {Time: suite.basicQueryTime + 3*tsdbtest.HoursInMillis, Value: 30}, + {Time: suite.basicQueryTime + 4*tsdbtest.HoursInMillis, Value: 40}, + {Time: suite.basicQueryTime + 5*tsdbtest.HoursInMillis, Value: 50}, + {Time: suite.basicQueryTime + 6*tsdbtest.HoursInMillis, Value: 60}, + {Time: suite.basicQueryTime + 7*tsdbtest.HoursInMillis, Value: 70}, + {Time: suite.basicQueryTime + 8*tsdbtest.HoursInMillis, Value: 80}, + {Time: suite.basicQueryTime + 9*tsdbtest.HoursInMillis, Value: 90}, + }} + + querierV2, err := adapter.QuerierV2() + if err != nil { + suite.T().Fatalf("Failed to create querier v2, err: %v", err) + } + + params := &pquerier.SelectParams{Name: "cpu", + Functions: "sum", + Step: 1 * tsdbtest.HoursInMillis, + AggregationWindow: 1 * tsdbtest.HoursInMillis, + From: suite.basicQueryTime, + To: suite.basicQueryTime + 10*tsdbtest.HoursInMillis} + set, err := querierV2.Select(params) + if err != nil { + suite.T().Fatalf("Failed to exeute query, err: %v", err) + } + + var seriesCount int + for set.Next() { + seriesCount++ + iter := set.At().Iterator() + + data, err := tsdbtest.IteratorToSlice(iter) + agg := set.At().Labels().Get(aggregate.AggregateLabel) + if err != nil { + suite.T().Fatal(err) + } + suite.compareSingleMetricWithAggregator(data, expected, agg) + } + + assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go new file mode 100644 index 00000000..573af6a0 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go @@ -0,0 +1,363 @@ +package pquerier + +import ( + "fmt" + "math" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Create a new Querier interface +func NewV3ioQuerier(container v3io.Container, logger logger.Logger, + cfg *config.V3ioConfig, partMngr *partmgr.PartitionManager) *V3ioQuerier { + newQuerier := V3ioQuerier{ + container: container, + logger: logger.GetChild("Querier"), + cfg: cfg, + } + newQuerier.partitionMngr = partMngr + newQuerier.performanceReporter = performance.ReporterInstanceFromConfig(cfg) + return &newQuerier +} + +type V3ioQuerier struct { + logger logger.Logger + container v3io.Container + cfg *config.V3ioConfig + partitionMngr *partmgr.PartitionManager + performanceReporter *performance.MetricReporter +} + +type SelectParams struct { + Name string + Functions string + From, To, Step int64 + Windows []int + Filter string + RequestedColumns []RequestedColumn + GroupBy string + AggregationWindow int64 + UseOnlyClientAggr bool + + disableAllAggr bool + disableClientAggr bool +} + +func (s *SelectParams) getRequestedColumns() ([]RequestedColumn, error) { + if err := s.validateSelectParams(); err != nil { + return nil, err + } + if s.RequestedColumns != nil { + return s.RequestedColumns, nil + } + functions := strings.Split(s.Functions, ",") + metricNames := strings.Split(s.Name, ",") + columns := make([]RequestedColumn, len(functions)*len(metricNames)) + var index int + for _, metric := range metricNames { + for _, function := range functions { + trimmed := strings.TrimSpace(function) + metricName := strings.TrimSpace(metric) + newCol := RequestedColumn{Function: trimmed, Metric: metricName, Interpolator: defaultInterpolation.String()} + columns[index] = newCol + index++ + } + } + return columns, nil +} + +func (s *SelectParams) validateSelectParams() error { + if s.UseOnlyClientAggr && s.disableClientAggr { + return errors.New("can not query, both `useOnlyClientAggr` and `disableClientAggr` flags are set") + } + + if s.RequestedColumns == nil { + functions := strings.Split(s.Functions, ",") + functionMap := make(map[string]bool, len(functions)) + for _, function := range functions { + trimmed := strings.TrimSpace(function) + if functionMap[trimmed] { + return fmt.Errorf("function '%v' was requested multiple time", trimmed) + } + functionMap[trimmed] = true + } + } else { + functionMap := make(map[string]bool, len(s.RequestedColumns)) + for _, col := range s.RequestedColumns { + trimmed := strings.TrimSpace(col.Function) + key := fmt.Sprintf("%v-%v", col.Metric, trimmed) + if functionMap[key] { + return fmt.Errorf("function '%v' for metric '%v' was requested multiple time", trimmed, col.Metric) + } + functionMap[key] = true + } + } + + return nil +} + +func (q *V3ioQuerier) SelectProm(params *SelectParams, noAggr bool) (utils.SeriesSet, error) { + params.disableAllAggr = noAggr + params.disableClientAggr = q.cfg.DisableClientAggr + iter, err := q.baseSelectQry(params, false) + if err != nil || iter == nil { + return utils.NullSeriesSet{}, err + } + + return iter, nil +} + +// Base query function +func (q *V3ioQuerier) Select(params *SelectParams) (utils.SeriesSet, error) { + params.disableAllAggr = false + params.disableClientAggr = q.cfg.DisableClientAggr + iter, err := q.baseSelectQry(params, true) + if err != nil || iter == nil { + return utils.NullSeriesSet{}, err + } + + return iter, nil +} + +func (q *V3ioQuerier) SelectDataFrame(params *SelectParams) (FrameSet, error) { + params.disableAllAggr = false + params.disableClientAggr = q.cfg.DisableClientAggr + iter, err := q.baseSelectQry(params, true) + if err != nil || iter == nil { + return nullFrameSet{}, err + } + + return iter, nil +} + +func (q *V3ioQuerier) baseSelectQry(params *SelectParams, showAggregateLabel bool) (iter *frameIterator, err error) { + if params.To < params.From { + return nil, errors.Errorf("End time '%d' is lower than start time '%d'.", params.To, params.From) + } + + err = q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return nil, errors.Wrap(err, "Failed to read/update the TSDB schema.") + } + + // If the config is set to use only client configuration override the query parameter. + if q.cfg.UsePreciseAggregations { + params.UseOnlyClientAggr = true + } + + selectContext := selectQueryContext{ + container: q.container, + logger: q.logger, + workers: q.cfg.QryWorkers, + showAggregateLabel: showAggregateLabel, + v3ioConfig: q.cfg, + } + + q.logger.Debug("Select query:\n\tMetric: %s\n\tStart Time: %s (%d)\n\tEnd Time: %s (%d)\n\tFunction: %s\n\t"+ + "Step: %d\n\tFilter: %s\n\tWindows: %v\n\tDisable All Aggr: %t\n\tDisable Client Aggr: %t", + params.Name, time.Unix(params.From/1000, 0).String(), params.From, time.Unix(params.To/1000, 0).String(), + params.To, params.Functions, params.Step, + params.Filter, params.Windows, params.disableAllAggr, params.disableClientAggr) + + q.performanceReporter.WithTimer("QueryTimer", func() { + params.Filter = strings.Replace(params.Filter, config.PrometheusMetricNameAttribute, config.MetricNameAttrName, -1) + + // Get all partitions containing data relevant to the query. If the Aggregation Window parameter is specified take it in account. + parts := q.partitionMngr.PartsForRange(params.From-params.AggregationWindow, params.To, true) + if len(parts) == 0 { + return + } + + minExistingTime, maxExistingTime := parts[0].GetStartTime(), parts[len(parts)-1].GetEndTime() + if params.From < minExistingTime { + params.From = minExistingTime + } + if params.To > maxExistingTime { + params.To = maxExistingTime + } + + iter, err = selectContext.start(parts, params) + return + }) + + return +} + +// Return the current metric names +func (q *V3ioQuerier) LabelValues(labelKey string) (result []string, err error) { + q.performanceReporter.WithTimer("LabelValuesTimer", func() { + if labelKey == config.PrometheusMetricNameAttribute { + result, err = q.getMetricNames() + } else { + result, err = q.getLabelValues(labelKey) + } + }) + return +} + +// Stub +func (q *V3ioQuerier) LabelNames() ([]string, error) { + return nil, nil +} + +func (q *V3ioQuerier) getMetricNames() ([]string, error) { + input := v3io.GetItemsInput{ + Path: filepath.Join(q.cfg.TablePath, config.NamesDirectory) + "/", // Need a trailing slash + AttributeNames: []string{config.ObjectNameAttrName}, + } + + iter, err := utils.NewAsyncItemsCursor(q.container, &input, q.cfg.QryWorkers, []string{}, q.logger) + if err != nil { + return nil, err + } + + var metricNames []string + + for iter.Next() { + metricNames = append(metricNames, iter.GetField(config.ObjectNameAttrName).(string)) + } + + sort.Sort(sort.StringSlice(metricNames)) + + if iter.Err() != nil { + return nil, fmt.Errorf("failed to read metric names; err = %v", iter.Err().Error()) + } + + return metricNames, nil +} + +func (q *V3ioQuerier) getLabelValues(labelKey string) ([]string, error) { + + // Sync the partition manager (hack) + err := q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return nil, err + } + + partitionPaths := q.partitionMngr.GetPartitionsPaths() + + // If there are no partitions yet, there are no labels + if len(partitionPaths) == 0 { + return nil, nil + } + + labelValuesMap := map[string]struct{}{} + + // Get all label sets + input := v3io.GetItemsInput{ + Path: partitionPaths[0], + AttributeNames: []string{config.LabelSetAttrName}, + } + + iter, err := utils.NewAsyncItemsCursor(q.container, &input, q.cfg.QryWorkers, []string{}, q.logger) + if err != nil { + return nil, err + } + + // Iterate over the results + for iter.Next() { + labelSet := iter.GetField(config.LabelSetAttrName).(string) + + // For a label set of k1=v1,k2=v2, k2=v3, for labelKey "k2", for example, + // we want to convert the set to [v2, v3] + + // Split at "," to get k=v pairs + for _, label := range strings.Split(labelSet, ",") { + + // Split at "=" to get the label key and label value + splitLabel := strings.SplitN(label, "=", 2) + + // If we have two elements and the first element (the key) is equal + // to what we're looking for, save the label value in the map. + // Use a map to prevent duplicates. + if len(splitLabel) == 2 && splitLabel[0] == labelKey { + labelValuesMap[splitLabel[1]] = struct{}{} + } + } + } + + if iter.Err() != nil { + return nil, fmt.Errorf("failed to read label values, err= %v", iter.Err().Error()) + } + + var labelValues []string + for labelValue := range labelValuesMap { + labelValues = append(labelValues, labelValue) + } + + return labelValues, nil +} + +// Returns all unique labels sets we have in the data +func (q *V3ioQuerier) GetLabelSets(metric string, filter string) ([]utils.Labels, error) { + err := q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return nil, err + } + + partitionPaths := q.partitionMngr.GetPartitionsPaths() + + // If there are no partitions yet, there are no labels + if len(partitionPaths) == 0 { + return nil, nil + } + + var shardingKeys []string + if metric != "" { + shardingKeys = q.partitionMngr.PartsForRange(0, math.MaxInt64, true)[0].GetShardingKeys(metric) + } + + labelsMap := make(map[uint64]utils.Labels) + + // Get all label sets + input := v3io.GetItemsInput{ + Filter: filter, + AttributeNames: []string{config.LabelSetAttrName, config.MetricNameAttrName}, + } + + // Because of performance issues we only want to query the last two partitions + partitionsToQuery := []string{partitionPaths[len(partitionPaths)-1]} + if len(partitionPaths) > 1 { + partitionsToQuery = append(partitionsToQuery, partitionPaths[len(partitionPaths)-2]) + } + iter, err := utils.NewAsyncItemsCursorMultiplePartitions(q.container, &input, q.cfg.QryWorkers, shardingKeys, q.logger, partitionsToQuery) + if err != nil { + return nil, err + } + + // Iterate over the results + for iter.Next() { + labelSet := iter.GetField(config.LabelSetAttrName).(string) + currLabels, err := utils.LabelsFromString(labelSet) + if err != nil { + return nil, err + } + + currLabels = append(utils.LabelsFromStringList(config.PrometheusMetricNameAttribute, + iter.GetField(config.MetricNameAttrName).(string)), currLabels...) + + labelsMap[currLabels.Hash()] = currLabels + } + + if iter.Err() != nil { + return nil, fmt.Errorf("failed to read label values, err= %v", iter.Err().Error()) + } + + labels := make([]utils.Labels, len(labelsMap)) + var counter int + for _, lset := range labelsMap { + labels[counter] = lset + counter++ + } + return labels, nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go new file mode 100644 index 00000000..4f3cd121 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go @@ -0,0 +1,628 @@ +package pquerier + +import ( + "fmt" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/frames" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const defaultToleranceFactor = 2 + +type selectQueryContext struct { + logger logger.Logger + container v3io.Container + workers int + v3ioConfig *config.V3ioConfig + + queryParams *SelectParams + showAggregateLabel bool + + columnsSpec []columnMeta + columnsSpecByMetric map[string][]columnMeta + totalColumns int + isCrossSeriesAggregate bool + + // In case one of the aggregates of one of the metrics should use client side aggregates + // but the user requested to disable client aggregations - return raw data for every requested metric + forceRawQuery bool + + dataFrames map[uint64]*dataFrame + frameList []*dataFrame + requestChannels []chan *qryResults + errorChannel chan error + wg sync.WaitGroup + createDFLock sync.Mutex + stopChan chan bool + queryWG sync.WaitGroup + finalErrorChan chan error +} + +func (queryCtx *selectQueryContext) start(parts []*partmgr.DBPartition, params *SelectParams) (*frameIterator, error) { + queryCtx.dataFrames = make(map[uint64]*dataFrame) + + queryCtx.queryParams = params + var err error + queryCtx.columnsSpec, queryCtx.columnsSpecByMetric, err = queryCtx.createColumnSpecs() + if err != nil { + return nil, err + } + + // If step isn't passed (e.g., when using the console), the step is the + // difference between the end (maxt) and start (mint) times (e.g., 5 minutes) + if queryCtx.hasAtLeastOneFunction() && params.Step == 0 { + queryCtx.queryParams.Step = params.To - params.From + } + + // We query every partition for every requested metric + queries := make([]*partQuery, len(parts)*len(queryCtx.columnsSpecByMetric)) + + var queryIndex int + for _, part := range parts { + currQueries, err := queryCtx.queryPartition(part) + if err != nil { + return nil, err + } + for _, q := range currQueries { + queries[queryIndex] = q + queryIndex++ + } + } + + queryCtx.stopChan = make(chan bool, 1) + queryCtx.finalErrorChan = make(chan error, 1) + queryCtx.errorChannel = make(chan error, queryCtx.workers+len(queries)) + + err = queryCtx.startCollectors() + if err != nil { + return nil, err + } + + for _, query := range queries { + queryCtx.queryWG.Add(1) + go processQueryResults(queryCtx, query) + } + + queryCtx.queryWG.Wait() + for i := 0; i < queryCtx.workers; i++ { + close(queryCtx.requestChannels[i]) + } + + // wait for Go routines to complete + queryCtx.wg.Wait() + close(queryCtx.errorChannel) + + // return first error + err = <-queryCtx.finalErrorChan + if err != nil { + return nil, err + } + + if len(queryCtx.frameList) > 0 { + queryCtx.totalColumns = queryCtx.frameList[0].Len() + } + + return newFrameIterator(queryCtx) +} + +func (queryCtx *selectQueryContext) metricsAggregatesToString(metric string) (string, bool) { + var result strings.Builder + specs := queryCtx.columnsSpecByMetric[metric] + specsNum := len(specs) + if specsNum == 0 { + return "", false + } + + var requestedRawColumn bool + result.WriteString(specs[0].function.String()) + for i := 1; i < specsNum; i++ { + if specs[i].function.String() == "" { + requestedRawColumn = true + } else { + result.WriteString(",") + result.WriteString(specs[i].function.String()) + } + } + + return result.String(), requestedRawColumn && result.Len() > 0 +} + +// Query a single partition +func (queryCtx *selectQueryContext) queryPartition(partition *partmgr.DBPartition) ([]*partQuery, error) { + var queries []*partQuery + var err error + + mint, maxt := partition.GetPartitionRange() + + if queryCtx.queryParams.To < maxt { + maxt = queryCtx.queryParams.To + } + + if queryCtx.queryParams.From > mint { + mint = queryCtx.queryParams.From + } + + queryRawInsteadOfAggregates, doForceAllRawQuery := false, false + var index int + + for metric := range queryCtx.columnsSpecByMetric { + var aggregationParams *aggregate.AggregationParams + functions, requestAggregatesAndRaw := queryCtx.metricsAggregatesToString(metric) + + // Check whether there are aggregations to add and aggregates aren't disabled + if functions != "" && !queryCtx.queryParams.disableAllAggr { + + if queryCtx.queryParams.Step > partition.RollupTime() && queryCtx.queryParams.disableClientAggr { + queryCtx.queryParams.Step = partition.RollupTime() + } + + params, err := aggregate.NewAggregationParams(functions, + "v", + partition.AggrBuckets(), + queryCtx.queryParams.Step, + queryCtx.queryParams.AggregationWindow, + partition.RollupTime(), + queryCtx.queryParams.Windows, + queryCtx.queryParams.disableClientAggr, + queryCtx.v3ioConfig.UseServerAggregateCoefficient) + + if err != nil { + return nil, err + } + aggregationParams = params + + } + + newQuery := &partQuery{mint: mint, + maxt: maxt, + partition: partition, + step: queryCtx.queryParams.Step, + name: metric, + aggregatesAndChunk: requestAggregatesAndRaw} + if aggregationParams != nil { + // Cross series aggregations cannot use server side aggregates. + newQuery.useServerSideAggregates = aggregationParams.CanAggregate(partition.AggrType()) && + !queryCtx.isCrossSeriesAggregate && + !queryCtx.queryParams.UseOnlyClientAggr + if newQuery.useServerSideAggregates || !queryCtx.queryParams.disableClientAggr { + newQuery.aggregationParams = aggregationParams + } + } + + if newQuery.useServerSideAggregates && !requestAggregatesAndRaw { + newQuery.preAggregateLabels = queryCtx.parsePreAggregateLabels(partition) + } + + queries = append(queries, newQuery) + + currentQueryShouldQueryRawInsteadOfAggregates := !newQuery.useServerSideAggregates && queryCtx.queryParams.disableClientAggr + if len(queryCtx.columnsSpecByMetric) == 1 && currentQueryShouldQueryRawInsteadOfAggregates { + doForceAllRawQuery = true + } else if index == 0 { + queryRawInsteadOfAggregates = currentQueryShouldQueryRawInsteadOfAggregates + } else if queryRawInsteadOfAggregates != currentQueryShouldQueryRawInsteadOfAggregates { + doForceAllRawQuery = true + } + index++ + } + + if doForceAllRawQuery { + queryCtx.forceRawQuery = true + for _, q := range queries { + q.aggregationParams = nil + q.useServerSideAggregates = false + err = q.getItems(queryCtx) + if err != nil { + break + } + } + } else { + for _, q := range queries { + err = q.getItems(queryCtx) + if err != nil { + break + } + } + } + + return queries, err +} + +func (queryCtx *selectQueryContext) parsePreAggregateLabels(partition *partmgr.DBPartition) []string { + if queryCtx.queryParams.GroupBy != "" { + groupByLabelSlice := strings.Split(queryCtx.queryParams.GroupBy, ",") + groupByLabelSet := make(map[string]bool) + for _, groupByLabel := range groupByLabelSlice { + groupByLabelSet[groupByLabel] = true + } + outer: + for _, preAggr := range partition.PreAggregates() { + if len(preAggr.Labels) != len(groupByLabelSet) { + continue + } + for _, label := range preAggr.Labels { + if !groupByLabelSet[label] { + continue outer + } + } + sort.Strings(groupByLabelSlice) + return groupByLabelSlice + } + } + return nil +} + +func (queryCtx *selectQueryContext) startCollectors() error { + + queryCtx.requestChannels = make([]chan *qryResults, queryCtx.workers) + + // Increment the WaitGroup counter. + queryCtx.wg.Add(queryCtx.workers) + + for i := 0; i < queryCtx.workers; i++ { + newChan := make(chan *qryResults, 1000) + queryCtx.requestChannels[i] = newChan + + go func(index int) { + mainCollector(queryCtx, queryCtx.requestChannels[index]) + }(i) + } + + // Watch error channel, and signal all go routines to stop in case of an error + go func() { + // Signal all goroutines to stop when error received + err, ok := <-queryCtx.errorChannel + if ok && err != nil { + close(queryCtx.stopChan) + queryCtx.finalErrorChan <- err + } + + close(queryCtx.finalErrorChan) + return + }() + + return nil +} + +func processQueryResults(queryCtx *selectQueryContext, query *partQuery) { + defer queryCtx.queryWG.Done() + + for query.Next() { + + // read metric name + name, ok := query.GetField(config.MetricNameAttrName).(string) + if !ok { + queryCtx.errorChannel <- fmt.Errorf("could not find metric name attribute in response, res:%v", query.GetFields()) + return + } + + // read label set + lsetAttr, lok := query.GetField(config.LabelSetAttrName).(string) + if !lok { + queryCtx.errorChannel <- fmt.Errorf("could not find label set attribute in response, res:%v", query.GetFields()) + return + } + + lset, err := utils.LabelsFromString(lsetAttr) + if err != nil { + queryCtx.errorChannel <- err + return + } + + // read chunk encoding type + var encoding chunkenc.Encoding + encodingStr, ok := query.GetField(config.EncodingAttrName).(string) + // If we don't have the encoding attribute, use XOR as default. (for backwards compatibility) + if !ok { + encoding = chunkenc.EncXOR + } else { + intEncoding, err := strconv.Atoi(encodingStr) + if err != nil { + queryCtx.errorChannel <- fmt.Errorf("error parsing encoding type of chunk, got: %v, error: %v", encodingStr, err) + return + } + encoding = chunkenc.Encoding(intEncoding) + } + + results := qryResults{name: name, encoding: encoding, query: query, fields: query.GetFields()} + sort.Sort(lset) // maybe skipped if its written sorted + var hash uint64 + + if queryCtx.queryParams.GroupBy != "" { + groupByList := strings.Split(queryCtx.queryParams.GroupBy, ",") + newLset := make(utils.Labels, len(groupByList)) + for i, label := range groupByList { + trimmed := strings.TrimSpace(label) + labelValue := lset.Get(trimmed) + if labelValue != "" { + newLset[i] = utils.Label{Name: trimmed, Value: labelValue} + } else { + queryCtx.errorChannel <- fmt.Errorf("no label named %v found to group by", trimmed) + return + } + } + lset = newLset + hash = newLset.Hash() + } else if queryCtx.isCrossSeriesAggregate { + hash = uint64(0) + lset = utils.Labels{} + } else { + hash = lset.Hash() + } + + queryCtx.createDFLock.Lock() + // find or create data frame + frame, ok := queryCtx.dataFrames[hash] + if !ok { + var err error + frame, err = newDataFrame(queryCtx.columnsSpec, + queryCtx.getOrCreateTimeColumn(), + lset, + hash, + queryCtx.isRawQuery(), + queryCtx.getResultBucketsSize(), + results.IsServerAggregates(), + queryCtx.showAggregateLabel) + if err != nil { + queryCtx.errorChannel <- err + queryCtx.createDFLock.Unlock() + return + } + queryCtx.dataFrames[hash] = frame + queryCtx.frameList = append(queryCtx.frameList, frame) + } + queryCtx.createDFLock.Unlock() + + results.frame = frame + workerNum := hash & uint64(queryCtx.workers-1) + + // In case termination signal was received exit, Otherwise send query result to worker + select { + case _ = <-queryCtx.stopChan: + return + case queryCtx.requestChannels[workerNum] <- &results: + } + + } + + if query.Err() != nil { + queryCtx.errorChannel <- query.Err() + } +} + +func (queryCtx *selectQueryContext) createColumnSpecs() ([]columnMeta, map[string][]columnMeta, error) { + var columnsSpec []columnMeta + columnsSpecByMetric := make(map[string][]columnMeta) + requestedColumns, err := queryCtx.queryParams.getRequestedColumns() + if err != nil { + return nil, nil, err + } + + for i, col := range requestedColumns { + _, ok := columnsSpecByMetric[col.Metric] + if !ok { + columnsSpecByMetric[col.Metric] = []columnMeta{} + } + + inter, err := StrToInterpolateType(col.Interpolator) + if err != nil { + return nil, nil, err + } + + tolerance := col.InterpolationTolerance + if tolerance == 0 { + tolerance = queryCtx.queryParams.Step * defaultToleranceFactor + } + colMeta := columnMeta{metric: col.Metric, alias: col.Alias, interpolationType: inter, interpolationTolerance: tolerance} + + if col.GetFunction() != "" { + // validating that all given aggregates are either cross series or not + if col.isCrossSeries() { + if i > 0 && !queryCtx.isCrossSeriesAggregate { + return nil, nil, fmt.Errorf("can not aggregate both over time and across series aggregates") + } + queryCtx.isCrossSeriesAggregate = true + } else if queryCtx.isCrossSeriesAggregate { + return nil, nil, fmt.Errorf("can not aggregate both over time and across series aggregates") + } + aggr, err := aggregate.FromString(col.GetFunction()) + if err != nil { + return nil, nil, err + } + colMeta.function = aggr + } + columnsSpecByMetric[col.Metric] = append(columnsSpecByMetric[col.Metric], colMeta) + columnsSpec = append(columnsSpec, colMeta) + } + + // Adding hidden columns if needed + for metric, cols := range columnsSpecByMetric { + var aggregatesMask aggregate.AggrType + var aggregates []aggregate.AggrType + var metricInterpolationType InterpolationType + var metricInterpolationTolerance int64 + for _, colSpec := range cols { + aggregatesMask |= colSpec.function + aggregates = append(aggregates, colSpec.function) + + if metricInterpolationType == 0 { + if colSpec.interpolationType != 0 { + metricInterpolationType = colSpec.interpolationType + metricInterpolationTolerance = colSpec.interpolationTolerance + } + } else if colSpec.interpolationType != 0 && colSpec.interpolationType != metricInterpolationType { + return nil, nil, fmt.Errorf("multiple interpolation for the same metric are not supported, got %v and %v", + metricInterpolationType.String(), + colSpec.interpolationType.String()) + } else if metricInterpolationTolerance != colSpec.interpolationTolerance { + return nil, nil, fmt.Errorf("different interpolation tolerances for the same metric are not supported, got %v and %v", + metricInterpolationTolerance, + colSpec.interpolationTolerance) + } + } + + // Add hidden aggregates only if there the user specified aggregations + if aggregatesMask != 0 { + hiddenColumns := aggregate.GetHiddenAggregatesWithCount(aggregatesMask, aggregates) + for _, hiddenAggr := range hiddenColumns { + hiddenCol := columnMeta{metric: metric, function: hiddenAggr, isHidden: true} + columnsSpec = append(columnsSpec, hiddenCol) + columnsSpecByMetric[metric] = append(columnsSpecByMetric[metric], hiddenCol) + } + } + + // After creating all columns set their interpolation function + for i := 0; i < len(columnsSpecByMetric[metric]); i++ { + columnsSpecByMetric[metric][i].interpolationType = metricInterpolationType + columnsSpecByMetric[metric][i].interpolationTolerance = metricInterpolationTolerance + } + for i, col := range columnsSpec { + if col.metric == metric { + columnsSpec[i].interpolationType = metricInterpolationType + columnsSpec[i].interpolationTolerance = metricInterpolationTolerance + } + } + } + + if len(columnsSpec) == 0 { + return nil, nil, errors.Errorf("no Columns were specified for query: %v", queryCtx.queryParams) + } + return columnsSpec, columnsSpecByMetric, nil +} + +func (queryCtx *selectQueryContext) getOrCreateTimeColumn() Column { + // When querying for raw data we don't need to generate a time column since we return the raw time + if queryCtx.isRawQuery() { + return nil + } + + return queryCtx.generateTimeColumn() +} + +func (queryCtx *selectQueryContext) generateTimeColumn() Column { + columnMeta := columnMeta{metric: "time"} + timeColumn := newDataColumn("time", columnMeta, queryCtx.getResultBucketsSize(), frames.TimeType) + i := 0 + for t := queryCtx.queryParams.From; t <= queryCtx.queryParams.To; t += queryCtx.queryParams.Step { + err := timeColumn.SetDataAt(i, time.Unix(t/1000, (t%1000)*1e6)) + if err != nil { + queryCtx.logger.ErrorWith(errors.Wrap(err, fmt.Sprintf("could not set data"))) + } else { + i++ + } + } + return timeColumn +} + +func (queryCtx *selectQueryContext) isRawQuery() bool { + return (!queryCtx.hasAtLeastOneFunction() && queryCtx.queryParams.Step == 0) || + queryCtx.queryParams.disableAllAggr || + queryCtx.forceRawQuery +} + +func (queryCtx *selectQueryContext) hasAtLeastOneFunction() bool { + atLeastOneFunction := false + for _, col := range queryCtx.columnsSpec { + if col.function != 0 { + atLeastOneFunction = true + break + } + } + return atLeastOneFunction +} + +func (queryCtx *selectQueryContext) getResultBucketsSize() int { + if queryCtx.isRawQuery() { + return 0 + } + return int((queryCtx.queryParams.To-queryCtx.queryParams.From)/queryCtx.queryParams.Step + 1) +} + +// query object for a single partition (or name and partition in future optimizations) + +type partQuery struct { + partition *partmgr.DBPartition + iter utils.ItemsCursor + partIndex int + + baseTime int64 + mint, maxt int64 + attrs []string + step int64 + + chunk0Time int64 + chunkTime int64 + useServerSideAggregates bool + aggregationParams *aggregate.AggregationParams + + name string + preAggregateLabels []string + aggregatesAndChunk bool +} + +func (query *partQuery) getItems(ctx *selectQueryContext) error { + + path := query.partition.GetTablePath() + if len(query.preAggregateLabels) > 0 { + path = fmt.Sprintf("%sagg/%s/", path, strings.Join(query.preAggregateLabels, ",")) + } + + var shardingKeys []string + if query.name != "" { + shardingKeys = query.partition.GetShardingKeys(query.name) + } + attrs := []string{config.LabelSetAttrName, config.EncodingAttrName, config.MetricNameAttrName, config.MaxTimeAttrName, config.ObjectNameAttrName} + + if query.useServerSideAggregates { + query.attrs = query.aggregationParams.GetAttrNames() + } + // It is possible to request both server aggregates and raw chunk data (to downsample) for the same metric + // example: `select max(cpu), avg(cpu), cpu` with step = 1h + if !query.useServerSideAggregates || query.aggregatesAndChunk { + chunkAttr, chunk0Time := query.partition.Range2Attrs("v", query.mint-ctx.queryParams.AggregationWindow, query.maxt) + query.chunk0Time = chunk0Time + query.attrs = append(query.attrs, chunkAttr...) + } + attrs = append(attrs, query.attrs...) + + ctx.logger.DebugWith("Select - GetItems", "path", path, "attr", attrs, "filter", ctx.queryParams.Filter, "name", query.name) + input := v3io.GetItemsInput{Path: path, AttributeNames: attrs, Filter: ctx.queryParams.Filter, ShardingKey: query.name} + iter, err := utils.NewAsyncItemsCursor(ctx.container, &input, ctx.workers, shardingKeys, ctx.logger) + if err != nil { + return err + } + + query.iter = iter + return nil +} + +func (query *partQuery) Next() bool { + var res bool + + res = query.iter.Next() + return res +} + +func (query *partQuery) GetField(name string) interface{} { + return query.iter.GetField(name) +} + +func (query *partQuery) GetFields() map[string]interface{} { + return query.iter.GetFields() +} + +func (query *partQuery) Err() error { + return query.iter.Err() +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go new file mode 100644 index 00000000..0a58d205 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go @@ -0,0 +1,168 @@ +// +build unit + +package pquerier + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/aggregate" +) + +func TestCreateColumnSpecs(t *testing.T) { + testCases := []struct { + desc string + params SelectParams + expectedSpecs []columnMeta + expectedSpecsMap map[string][]columnMeta + }{ + {params: SelectParams{Name: "cpu"}, + expectedSpecs: []columnMeta{{metric: "cpu", interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", interpolationType: interpolateNext}}}}, + + {params: SelectParams{Name: "cpu", Functions: "count"}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}}}}, + + {params: SelectParams{Name: "cpu", Functions: "avg"}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), isHidden: true, interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), isHidden: true, interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), isHidden: true, interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), isHidden: true, interpolationType: interpolateNext}}}}, + + {params: SelectParams{Name: "cpu", Functions: "avg,count"}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), isHidden: true, interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), isHidden: true, interpolationType: interpolateNext}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count")}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("count")}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}, + {Metric: "disk", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count")}, {metric: "disk", function: toAggr("count")}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("count")}}, + "disk": {{metric: "disk", function: toAggr("count")}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "avg"}, + {Metric: "cpu", Function: "sum"}, + {Metric: "disk", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("avg")}, + {metric: "cpu", function: toAggr("sum")}, + {metric: "cpu", function: toAggr("count"), isHidden: true}, + {metric: "disk", function: toAggr("count")}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("avg")}, + {metric: "cpu", function: toAggr("sum")}, + {metric: "cpu", function: toAggr("count"), isHidden: true}}, + "disk": {{metric: "disk", function: toAggr("count")}}}}, + + {params: SelectParams{Name: "cpu,diskio"}, + expectedSpecs: []columnMeta{{metric: "cpu", interpolationType: interpolateNext}, + {metric: "diskio", interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", interpolationType: interpolateNext}}, + "diskio": {{metric: "diskio", interpolationType: interpolateNext}}}}, + + {params: SelectParams{Name: "cpu, diskio", Functions: "sum,count"}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("sum"), interpolationType: interpolateNext}, + {metric: "diskio", function: toAggr("count"), interpolationType: interpolateNext}, + {metric: "diskio", function: toAggr("sum"), interpolationType: interpolateNext}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateNext}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateNext}}, + "diskio": {{metric: "diskio", function: toAggr("sum"), interpolationType: interpolateNext}, + {metric: "diskio", function: toAggr("count"), interpolationType: interpolateNext}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "sum", Interpolator: "linear"}, + {Metric: "cpu", Function: "count", Interpolator: "linear"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "sum", Interpolator: "linear"}, + {Metric: "cpu", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "avg", Interpolator: "linear"}, + {Metric: "cpu", Function: "count"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear, isHidden: true}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("avg"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}, + {metric: "cpu", function: toAggr("sum"), interpolationType: interpolateLinear, isHidden: true}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count", Interpolator: "linear"}, + {Metric: "diskio", Function: "count", Interpolator: "prev_val"}, + {Metric: "diskio", Function: "sum"}}}, + expectedSpecs: []columnMeta{{metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}, + {metric: "diskio", function: toAggr("count"), interpolationType: interpolatePrev}, + {metric: "diskio", function: toAggr("sum"), interpolationType: interpolatePrev}}, + expectedSpecsMap: map[string][]columnMeta{"cpu": {{metric: "cpu", function: toAggr("count"), interpolationType: interpolateLinear}}, + "diskio": { + {metric: "diskio", function: toAggr("count"), interpolationType: interpolatePrev}, + {metric: "diskio", function: toAggr("sum"), interpolationType: interpolatePrev}}}}, + } + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + ctx := selectQueryContext{} + ctx.queryParams = &test.params + columnsSpec, columnsSpecByMetric, err := ctx.createColumnSpecs() + + if err != nil { + t.Fatal(err) + } + assert.ElementsMatch(t, test.expectedSpecs, columnsSpec) + assert.Equal(t, test.expectedSpecsMap, columnsSpecByMetric) + }) + } +} + +func TestNegativeCreateColumnSpecs(t *testing.T) { + testCases := []struct { + desc string + params SelectParams + }{ + {params: SelectParams{Name: "cpu", Functions: "count, count"}}, + + {params: SelectParams{Name: "cpu", Functions: "count, max,count"}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}, + {Metric: "cpu", Function: "count"}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}, + {Metric: "diskio", Function: "count"}, + {Metric: "cpu", Function: "count"}}}}, + + {params: SelectParams{RequestedColumns: []RequestedColumn{{Metric: "cpu", Function: "count"}, + {Metric: "diskio", Function: "count"}, + {Metric: "cpu", Function: " count "}}}}, + + {params: SelectParams{Name: "cpu", Functions: "count, count", UseOnlyClientAggr: true, disableClientAggr: true}}, + } + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + ctx := selectQueryContext{} + ctx.queryParams = &test.params + _, _, err := ctx.createColumnSpecs() + + if err == nil { + t.Fatal("expected error but finished normally") + } + }) + } +} + +func toAggr(str string) aggregate.AggrType { + aggr, _ := aggregate.FromString(str) + return aggr +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/series.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/series.go new file mode 100644 index 00000000..dbc67aa5 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/series.go @@ -0,0 +1,140 @@ +package pquerier + +import ( + "math" + "time" + + "github.com/v3io/frames" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func NewDataFrameColumnSeries(indexColumn, dataColumn, countColumn Column, labels utils.Labels, hash uint64, showAggregateLabel bool) *DataFrameColumnSeries { + // If we need to return the Aggregate label then add it, otherwise (for example in prometheus) return labels without it + aggString := dataColumn.GetColumnSpec().function.String() + if showAggregateLabel && aggString != "" { + labels = append(labels, utils.LabelsFromStringList(aggregate.AggregateLabel, aggString)...) + } + + wantedMetricName := dataColumn.GetColumnSpec().alias + if wantedMetricName == "" { + wantedMetricName = dataColumn.GetColumnSpec().metric + } + + // The labels we get from the Dataframe are agnostic to the metric name, since there might be several metrics in one Dataframe + labels = append(labels, utils.LabelsFromStringList(config.PrometheusMetricNameAttribute, wantedMetricName)...) + s := &DataFrameColumnSeries{labels: labels, key: hash} + s.iter = &dataFrameColumnSeriesIterator{indexColumn: indexColumn, dataColumn: dataColumn, countColumn: countColumn, currentIndex: -1} + return s +} + +// This series converts two columns into a series of time-value pairs +type DataFrameColumnSeries struct { + labels utils.Labels + key uint64 + iter utils.SeriesIterator +} + +func (s *DataFrameColumnSeries) Labels() utils.Labels { + return s.labels +} +func (s *DataFrameColumnSeries) Iterator() utils.SeriesIterator { return s.iter } +func (s *DataFrameColumnSeries) GetKey() uint64 { return s.key } + +type dataFrameColumnSeriesIterator struct { + dataColumn Column + indexColumn Column + countColumn Column // Count Column is needed to filter out empty buckets + + currentIndex int + err error +} + +func (it *dataFrameColumnSeriesIterator) Seek(seekT int64) bool { + if it.currentIndex >= it.dataColumn.Len() { + return false + } + t, _ := it.At() + if t >= seekT { + return true + } + + for it.Next() { + t, _ := it.At() + if t >= seekT { + return true + } + } + + return false +} + +func (it *dataFrameColumnSeriesIterator) At() (int64, float64) { + t, err := it.indexColumn.TimeAt(it.currentIndex) + if err != nil { + it.err = err + } + v, err := it.dataColumn.FloatAt(it.currentIndex) + if err != nil { + it.err = err + } + return t.UnixNano() / int64(time.Millisecond), v +} + +func (it *dataFrameColumnSeriesIterator) AtString() (int64, string) { + t, err := it.indexColumn.TimeAt(it.currentIndex) + if err != nil { + it.err = err + } + v, err := it.dataColumn.StringAt(it.currentIndex) + if err != nil { + it.err = err + } + return t.UnixNano() / int64(time.Millisecond), v +} + +func (it *dataFrameColumnSeriesIterator) Next() bool { + if it.err != nil { + return false + } + it.currentIndex = it.getNextValidCell(it.currentIndex) + + // It is enough to only check one of the columns since we assume they are both the same size + return it.currentIndex < it.indexColumn.Len() +} + +func (it *dataFrameColumnSeriesIterator) Err() error { return it.err } + +func (it *dataFrameColumnSeriesIterator) Encoding() chunkenc.Encoding { + enc := chunkenc.EncXOR + if it.dataColumn.DType() == frames.StringType { + enc = chunkenc.EncVariant + } + return enc +} + +func (it *dataFrameColumnSeriesIterator) getNextValidCell(from int) (nextIndex int) { + for nextIndex = from + 1; nextIndex < it.dataColumn.Len() && !it.doesCellHasData(nextIndex); nextIndex++ { + } + return +} + +func (it *dataFrameColumnSeriesIterator) doesCellHasData(cell int) bool { + // In case we don't have a count column (for example while down sampling) check if there is a real value at `cell` + if it.countColumn == nil { + f, err := it.dataColumn.FloatAt(cell) + if err != nil { + it.err = err + return false + } + return !math.IsNaN(f) + } + val, err := it.countColumn.FloatAt(cell) + if err != nil { + it.err = err + return false + } + return val > 0 +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go new file mode 100644 index 00000000..ccb5aefd --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go @@ -0,0 +1,179 @@ +package pquerier + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/utils" + "github.com/xwb1989/sqlparser" +) + +const emptyTableName = "dual" + +// ParseQuery Parses an sql query into `tsdb.selectParams` +// Currently supported syntax: +// select - selecting multiple metrics, aggregations, interpolation functions and aliasing +// from - only one table +// where - equality, and range operators. Not supporting regex,`IS NULL`, etc.. +// group by +func ParseQuery(sql string) (*SelectParams, string, error) { + stmt, err := sqlparser.Parse(sql) + if err != nil { + return nil, "", err + } + slct, ok := stmt.(*sqlparser.Select) + if !ok { + return nil, "", fmt.Errorf("not a SELECT statement") + } + + fromTable, err := getTableName(slct) + if err != nil { + return nil, "", err + } + + selectParams := &SelectParams{} + var columns []RequestedColumn + + for _, sexpr := range slct.SelectExprs { + currCol := RequestedColumn{} + switch col := sexpr.(type) { + case *sqlparser.AliasedExpr: + if !col.As.IsEmpty() { + currCol.Alias = col.As.String() + } + + switch expr := col.Expr.(type) { + case *sqlparser.FuncExpr: + err := parseFuncExpr(expr, &currCol) + if err != nil { + return nil, "", err + } + case *sqlparser.ColName: + currCol.Metric = removeBackticks(sqlparser.String(expr.Name)) + default: + return nil, "", fmt.Errorf("unknown columns type - %T", col.Expr) + } + columns = append(columns, currCol) + case *sqlparser.StarExpr: + // Appending empty column, meaning a column template for raw data + columns = append(columns, currCol) + default: + return nil, "", fmt.Errorf("unknown SELECT column type - %T", sexpr) + } + } + if len(columns) == 0 { + return nil, "", fmt.Errorf("no columns") + } + selectParams.RequestedColumns = columns + + if slct.Where != nil { + selectParams.Filter, _ = parseFilter(strings.TrimPrefix(sqlparser.String(slct.Where), " where ")) + } + if slct.GroupBy != nil { + selectParams.GroupBy = strings.TrimPrefix(sqlparser.String(slct.GroupBy), " group by ") + } + + err = validateColumnNames(selectParams) + if err != nil { + return nil, "", err + } + + return selectParams, fromTable, nil +} + +func parseFuncExpr(expr *sqlparser.FuncExpr, destCol *RequestedColumn) error { + possibleInterpolator := removeBackticks(sqlparser.String(expr.Name)) + if _, err := StrToInterpolateType(possibleInterpolator); err == nil { + destCol.Interpolator = possibleInterpolator + numOfParameters := len(expr.Exprs) + if numOfParameters == 1 { + collName := expr.Exprs[0].(*sqlparser.AliasedExpr).Expr.(*sqlparser.ColName) + destCol.Metric = sqlparser.String(collName) + } else if numOfParameters == 2 { + collName := expr.Exprs[0].(*sqlparser.AliasedExpr).Expr.(*sqlparser.ColName) + destCol.Metric = sqlparser.String(collName) + toleranceVal := expr.Exprs[1].(*sqlparser.AliasedExpr).Expr.(*sqlparser.SQLVal) + toleranceString := sqlparser.String(toleranceVal) + + // SQLVal cannot start with a number so it has to be surrounded with ticks. + // Stripping ticks + tolerance, err := utils.Str2duration(toleranceString[1 : len(toleranceString)-1]) + if err != nil { + return err + } + destCol.InterpolationTolerance = tolerance + } else { + return fmt.Errorf("unssoported number of parameters for function %v", possibleInterpolator) + } + } else { + destCol.Function = sqlparser.String(expr.Name) + + switch firstExpr := expr.Exprs[0].(type) { + case *sqlparser.AliasedExpr: + switch innerExpr := firstExpr.Expr.(type) { + case *sqlparser.ColName: + destCol.Metric = sqlparser.String(innerExpr.Name) + case *sqlparser.FuncExpr: + err := parseFuncExpr(innerExpr, destCol) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("could not parse expr")) + } + } + } + + if destCol.Metric == "" && destCol.Alias != "" { + return errors.New("cannot alias a wildcard") + } + } + + return nil +} + +func getTableName(slct *sqlparser.Select) (string, error) { + if nTables := len(slct.From); nTables != 1 { + return "", fmt.Errorf("select from multiple tables is not supported (got %d)", nTables) + } + aliased, ok := slct.From[0].(*sqlparser.AliasedTableExpr) + if !ok { + return "", fmt.Errorf("not a table select") + } + table, ok := aliased.Expr.(sqlparser.TableName) + if !ok { + return "", fmt.Errorf("not a table in FROM field") + } + + tableStr := table.Name.String() + if tableStr == emptyTableName { + return "", nil + } + return tableStr, nil +} +func parseFilter(originalFilter string) (string, error) { + return strings.Replace(originalFilter, " = ", " == ", -1), nil +} +func removeBackticks(origin string) string { + return strings.Replace(origin, "`", "", -1) +} + +func validateColumnNames(params *SelectParams) error { + names := make(map[string]bool) + requestedMetrics := make(map[string]bool) + + for _, column := range params.RequestedColumns { + columnName := column.GetColumnName() + if names[columnName] { + return fmt.Errorf("column name '%v' appears more than once in select query", columnName) + } + names[columnName] = true + requestedMetrics[column.Metric] = true + } + + for _, column := range params.RequestedColumns { + if column.Alias != "" && requestedMetrics[column.Alias] { + return fmt.Errorf("cannot use a metric name as an alias, alias: %v", column.Alias) + } + } + + return nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser_test.go new file mode 100644 index 00000000..82c228a7 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser_test.go @@ -0,0 +1,101 @@ +// +build unit + +package pquerier_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" +) + +func TestParseQuery(t *testing.T) { + testCases := []struct { + input string + output *pquerier.SelectParams + outputTable string + }{ + {input: "select columnA, columnB", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA"}, {Metric: "columnB"}}}}, + + {input: "select linear(columnA, '10m')", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", + Interpolator: "linear", + InterpolationTolerance: 10 * tsdbtest.MinuteInMillis}}}}, + + {input: "select max(prev_val(columnA)), avg(columnB)", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Interpolator: "prev_val", Function: "max"}, + {Metric: "columnB", Function: "avg"}}}}, + + {input: "select max(next_val(columnA)), avg(columnB)", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Interpolator: "next_val", Function: "max"}, + {Metric: "columnB", Function: "avg"}}}}, + + {input: "select max(prev_val(columnA, '1h')) as ahsheli, avg(columnB)", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", + Interpolator: "prev_val", + Function: "max", + Alias: "ahsheli", + InterpolationTolerance: tsdbtest.HoursInMillis}, + {Metric: "columnB", Function: "avg"}}}}, + + {input: "select columnA where columnB = 'tal' and columnC < 'Neiman'", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA"}}, Filter: "columnB == 'tal' and columnC < 'Neiman'"}}, + + {input: "select max(columnA) group by columnB", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Function: "max"}}, GroupBy: "columnB"}}, + + {input: "select min(columnA) as bambi, max(linear(columnB)) as bimba where columnB >= 123 group by columnB,columnC ", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Function: "min", Alias: "bambi"}, + {Metric: "columnB", Function: "max", Interpolator: "linear", Alias: "bimba"}}, + Filter: "columnB >= 123", GroupBy: "columnB, columnC"}}, + + {input: "select min(columnA) from my_table where columnB >= 123", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "columnA", Function: "min"}}, + Filter: "columnB >= 123"}, + outputTable: "my_table"}, + + {input: "select * from my_table", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: ""}}}, + outputTable: "my_table"}, + + {input: `select * from 'my/table'`, + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: ""}}}, + outputTable: "my/table"}, + + {input: "select max(*), avg(*) from my_table", + output: &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "", Function: "max"}, {Metric: "", Function: "avg"}}}, + outputTable: "my_table"}, + } + for _, test := range testCases { + t.Run(test.input, func(tt *testing.T) { + queryParams, table, err := pquerier.ParseQuery(test.input) + if err != nil { + tt.Fatal(err) + } + + assert.Equal(tt, test.output, queryParams) + assert.Equal(tt, test.outputTable, table) + }) + } +} + +func TestNegativeParseQuery(t *testing.T) { + testCases := []struct { + input string + }{ + {input: "select columnA as something, columnB as something"}, + {input: "select avg(columnA) as something, columnB as something"}, + {input: "select avg(*) as something"}, + {input: "select avg(cpu), max(cpu) as cpu"}, + } + for _, test := range testCases { + t.Run(test.input, func(tt *testing.T) { + _, _, err := pquerier.ParseQuery(test.input) + if err == nil { + tt.Fatalf("expected error but finished successfully") + } + }) + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go new file mode 100644 index 00000000..e3e9b7be --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go @@ -0,0 +1,105 @@ +package pquerier + +import ( + "fmt" + "strings" + + "github.com/v3io/frames" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" +) + +// data and metadata passed to the query processor workers via a channel +type qryResults struct { + frame *dataFrame + query *partQuery + name string + fields map[string]interface{} + encoding chunkenc.Encoding +} + +func (q *qryResults) IsRawQuery() bool { return q.frame.isRawSeries } + +func (q *qryResults) IsDownsample() bool { + _, ok := q.frame.columnByName[q.name] + + return ok && q.query.step != 0 +} + +func (q *qryResults) IsServerAggregates() bool { + return q.query.aggregationParams != nil && q.query.useServerSideAggregates +} + +func (q *qryResults) IsClientAggregates() bool { + return q.query.aggregationParams != nil && !q.query.useServerSideAggregates +} + +type RequestedColumn struct { + Metric string + Alias string + Function string + Interpolator string + InterpolationTolerance int64 // tolerance in Millis +} + +func (col *RequestedColumn) isCrossSeries() bool { + return strings.HasSuffix(col.Function, aggregate.CrossSeriesSuffix) +} + +// If the function is cross series, remove the suffix otherwise leave it as is +func (col *RequestedColumn) GetFunction() string { + return strings.TrimSuffix(col.Function, aggregate.CrossSeriesSuffix) +} + +func (col *RequestedColumn) GetColumnName() string { + if col.Alias != "" { + return col.Alias + } + // If no aggregations are requested (raw down sampled data) + if col.Function == "" { + return col.Metric + } + return fmt.Sprintf("%v(%v)", col.Function, col.Metric) +} + +type columnMeta struct { + metric string + alias string + function aggregate.AggrType + functionParams []interface{} + interpolationType InterpolationType + interpolationTolerance int64 + isHidden bool // real columns = columns the user has specifically requested. Hidden columns = columns needed to calculate the real columns but don't show to the user +} + +// if a user specifies he wants all metrics +func (c *columnMeta) isWildcard() bool { return c.metric == "" } + +// Concrete Column = has real data behind it, Virtual column = described as a function on top of concrete columns +func (c columnMeta) isConcrete() bool { return c.function == 0 || aggregate.IsRawAggregate(c.function) } +func (c columnMeta) getColumnName() string { + if c.alias != "" { + return c.alias + } + // If no aggregations are requested (raw down sampled data) + if c.function == 0 { + return c.metric + } + return fmt.Sprintf("%v(%v)", c.function.String(), c.metric) +} + +// SeriesSet contains a set of series. +type FrameSet interface { + NextFrame() bool + GetFrame() (frames.Frame, error) + Err() error +} + +// Null-frame set +type nullFrameSet struct { + err error +} + +func (s nullFrameSet) NextFrame() bool { return false } +func (s nullFrameSet) GetFrame() (frames.Frame, error) { return nil, nil } +func (s nullFrameSet) Err() error { return s.err } diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart.go new file mode 100644 index 00000000..b6a2befe --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart.go @@ -0,0 +1,195 @@ +package querier + +import ( + "sort" + + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func NewSetSorter(set utils.SeriesSet) (utils.SeriesSet, error) { + sorter := setSorter{} + sorter.set = set + sorter.index = -1 + + for set.Next() { + s := set.At() + i := sort.Search(len(sorter.list), func(i int) bool { return sorter.list[i].GetKey() >= s.GetKey() }) + sorter.list = append(sorter.list, nil) + copy(sorter.list[i+1:], sorter.list[i:]) + sorter.list[i] = s + } + if set.Err() != nil { + sorter.err = set.Err() + return nil, set.Err() + } + + return &sorter, nil +} + +type setSorter struct { + set utils.SeriesSet + list []utils.Series + index int + err error +} + +func (s *setSorter) Next() bool { + if s.index >= len(s.list)-1 { + return false + } + s.index++ + return true +} + +func (s *setSorter) At() utils.Series { + return s.list[s.index] +} + +func (s *setSorter) Err() error { return s.err } + +type IterSortMerger struct { + iters []utils.SeriesSet + done []bool + currKey uint64 + currInvalids []bool + currSeries []utils.Series + err error +} + +// Merge-sort multiple SeriesSets +func newIterSortMerger(sets []utils.SeriesSet) (utils.SeriesSet, error) { + newMerger := IterSortMerger{} + newMerger.iters = sets + newMerger.done = make([]bool, len(sets)) + newMerger.currInvalids = make([]bool, len(sets)) + return &newMerger, nil +} + +func (im *IterSortMerger) Next() bool { + + completed := true + keyIsSet := false + for i, iter := range im.iters { + if !im.currInvalids[i] { + im.done[i] = !iter.Next() + if iter.Err() != nil { + im.err = iter.Err() + return false + } + } + completed = completed && im.done[i] + if !im.done[i] { + key := iter.At().GetKey() + if !keyIsSet { + im.currKey = key + keyIsSet = true + } else if key < im.currKey { + im.currKey = key + } + } + } + + if completed { + return false + } + + im.currSeries = make([]utils.Series, 0, len(im.iters)) + for i, iter := range im.iters { + im.currInvalids[i] = true + if !im.done[i] { + if iter.At().GetKey() == im.currKey { + im.currInvalids[i] = false + im.currSeries = append(im.currSeries, iter.At()) + } + } + } + + return true +} + +// Return the current key and a list of iterators containing this key +func (im *IterSortMerger) At() utils.Series { + newSeries := mergedSeries{series: im.currSeries} + return &newSeries +} + +func (im *IterSortMerger) Err() error { + return im.err +} + +type mergedSeries struct { + series []utils.Series +} + +func (m *mergedSeries) Labels() utils.Labels { + return m.series[0].Labels() +} + +func (m *mergedSeries) Iterator() utils.SeriesIterator { + return newMergedSeriesIterator(m.series...) +} + +func (m *mergedSeries) GetKey() uint64 { + return m.series[0].GetKey() +} + +type mergedSeriesIterator struct { + series []utils.Series + i int + cur utils.SeriesIterator +} + +func newMergedSeriesIterator(s ...utils.Series) *mergedSeriesIterator { + return &mergedSeriesIterator{ + series: s, + i: 0, + cur: s[0].Iterator(), + } +} + +func (it *mergedSeriesIterator) Seek(t int64) bool { + // We just scan the merge series sequentially, as they are already + // pre-selected by time and should be accessed sequentially anyway. + for i, s := range it.series[it.i:] { + cur := s.Iterator() + if !cur.Seek(t) { + continue + } + it.cur = cur + it.i += i + return true + } + return false +} + +func (it *mergedSeriesIterator) Next() bool { + if it.cur.Next() { + return true + } + if err := it.cur.Err(); err != nil { + return false + } + if it.i == len(it.series)-1 { + return false + } + + it.i++ + it.cur = it.series[it.i].Iterator() + + return it.Next() +} + +func (it *mergedSeriesIterator) At() (t int64, v float64) { + return it.cur.At() +} + +func (it *mergedSeriesIterator) AtString() (t int64, v string) { return it.cur.AtString() } + +func (it *mergedSeriesIterator) Err() error { + return it.cur.Err() +} + +func (it *mergedSeriesIterator) Encoding() chunkenc.Encoding { + return chunkenc.EncXOR +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart_test.go new file mode 100644 index 00000000..bd77a960 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/multipart_test.go @@ -0,0 +1,73 @@ +// +build unit + +package querier + +import ( + "testing" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testIterSortMergerSuite struct { + suite.Suite +} + +type mockSeriesSet struct { + s []utils.Series + init bool +} + +func (m *mockSeriesSet) Next() bool { + if !m.init { + m.init = true + } else if len(m.s) > 1 { + m.s = m.s[1:] + } else { + return false + } + return true +} + +func (m *mockSeriesSet) At() utils.Series { + return m.s[0] +} + +func (m *mockSeriesSet) Err() error { + return nil +} + +type stubSeries uint64 + +func (stubSeries) Labels() utils.Labels { + panic("stub") +} + +func (stubSeries) Iterator() utils.SeriesIterator { + panic("stub") +} + +func (s stubSeries) GetKey() uint64 { + return uint64(s) +} + +func (suite *testIterSortMergerSuite) TestIterSortMerger() { + + s1 := []utils.Series{stubSeries(0), stubSeries(1)} + s2 := []utils.Series{stubSeries(2), stubSeries(3)} + iter, err := newIterSortMerger([]utils.SeriesSet{&mockSeriesSet{s: s1}, &mockSeriesSet{s: s2}}) + + suite.Require().Nil(err) + suite.Require().True(iter.Next()) + suite.Require().Equal(uint64(0), iter.At().GetKey()) + suite.Require().True(iter.Next()) + suite.Require().Equal(uint64(1), iter.At().GetKey()) + suite.Require().True(iter.Next()) + suite.Require().Equal(uint64(2), iter.At().GetKey()) + suite.Require().True(iter.Next()) + suite.Require().Equal(uint64(3), iter.At().GetKey()) +} + +func TestIterSortMergerSuiteSuite(t *testing.T) { + suite.Run(t, new(testIterSortMergerSuite)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go new file mode 100644 index 00000000..94879789 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go @@ -0,0 +1,363 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package querier + +import ( + "sort" + "strings" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Create a new Querier interface +func NewV3ioQuerier(container v3io.Container, logger logger.Logger, mint, maxt int64, + cfg *config.V3ioConfig, partMngr *partmgr.PartitionManager) *V3ioQuerier { + newQuerier := V3ioQuerier{ + container: container, + mint: mint, maxt: maxt, + logger: logger.GetChild("Querier"), + cfg: cfg, + } + newQuerier.partitionMngr = partMngr + newQuerier.performanceReporter = performance.ReporterInstanceFromConfig(cfg) + return &newQuerier +} + +type V3ioQuerier struct { + logger logger.Logger + container v3io.Container + cfg *config.V3ioConfig + mint, maxt int64 + partitionMngr *partmgr.PartitionManager + performanceReporter *performance.MetricReporter +} + +type SelectParams struct { + Name string + Functions string + Step int64 + Windows []int + Filter string + + disableAllAggr bool + disableClientAggr bool +} + +// Standard Time Series Query, return a set of series which match the condition +func (q *V3ioQuerier) Select(name, functions string, step int64, filter string) (utils.SeriesSet, error) { + + return q.selectQry(&SelectParams{ + Name: name, + Functions: functions, + Step: step, + Filter: filter, + disableClientAggr: q.cfg.DisableClientAggr, + }) + +} + +// Prometheus time-series query - return a set of time series that match the +// specified conditions +func (q *V3ioQuerier) SelectProm(name, functions string, step int64, filter string, noAggr bool) (utils.SeriesSet, error) { + + return q.selectQry(&SelectParams{ + Name: name, + Functions: functions, + Step: step, + Filter: filter, + disableClientAggr: true, + disableAllAggr: noAggr, + }) +} + +// Overlapping windows time-series query - return a set of series each with a +// list of aggregated results per window +// For example, get the last 1h, 6h, and 24h stats per metric (specify a 1h +// aggregation interval (step) of 3600*1000 (=1h), windows 1, 6, and 24, and an +// end (max) time). +func (q *V3ioQuerier) SelectOverlap(name, functions string, step int64, windows []int, filter string) (utils.SeriesSet, error) { + sort.Sort(sort.Reverse(sort.IntSlice(windows))) + + return q.selectQry(&SelectParams{ + Name: name, + Functions: functions, + Step: step, + Filter: filter, + Windows: windows, + disableClientAggr: q.cfg.DisableClientAggr, + }) +} + +// Base query function +func (q *V3ioQuerier) selectQry(params *SelectParams) (set utils.SeriesSet, err error) { + + err = q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return utils.NullSeriesSet{}, errors.Wrap(err, "Failed to read/update the TSDB schema.") + } + + set = utils.NullSeriesSet{} + + q.logger.Debug("Select query:\n\tMetric: %s\n\tStart Time: %s (%d)\n\tEnd Time: %s (%d)\n\tFunction: %s\n\t"+ + "Step: %d\n\tFilter: %s\n\tWindows: %v\n\tDisable All Aggr: %t\n\tDisable Client Aggr: %t", + params.Name, time.Unix(q.mint/1000, 0).String(), q.mint, time.Unix(q.maxt/1000, 0).String(), + q.maxt, params.Functions, params.Step, + params.Filter, params.Windows, params.disableAllAggr, params.disableClientAggr) + + q.performanceReporter.WithTimer("QueryTimer", func() { + params.Filter = strings.Replace(params.Filter, "__name__", "_name", -1) + + parts := q.partitionMngr.PartsForRange(q.mint, q.maxt, true) + if len(parts) == 0 { + return + } + + if len(parts) == 1 { + set, err = q.queryNumericPartition(parts[0], params) + return + } + + sets := make([]utils.SeriesSet, len(parts)) + for i, part := range parts { + set, err = q.queryNumericPartition(part, params) + if err != nil { + set = utils.NullSeriesSet{} + return + } + sets[i] = set + } + + // Sort each partition + /* TODO: Removed condition that applies sorting only on non range scan queries to fix bug with series coming OOO when querying multi partitions, + Need to think of a better solution. + */ + for i := 0; i < len(sets); i++ { + // TODO make it a Go routine per part + sorter, error := NewSetSorter(sets[i]) + if error != nil { + set = utils.NullSeriesSet{} + err = error + return + } + sets[i] = sorter + } + + set, err = newIterSortMerger(sets) + return + }) + + return +} + +// Query a single partition (with integer or float values) +func (q *V3ioQuerier) queryNumericPartition(partition *partmgr.DBPartition, params *SelectParams) (utils.SeriesSet, error) { + + mint, maxt := partition.GetPartitionRange() + step := params.Step + + if q.maxt < maxt { + maxt = q.maxt + } + + if q.mint > mint { + mint = q.mint + if step != 0 && step < (maxt-mint) { + // Temporary aggregation fix: if mint isn't aligned with the step, + // move it to the next step tick + mint += (maxt - mint) % step + } + } + + newSet := &V3ioSeriesSet{mint: mint, maxt: maxt, partition: partition, logger: q.logger} + + // If there are no aggregation functions and the aggregation-interval (step) + // size is greater than the stored aggregate, use the Average aggregate. + // TODO: When not using the Prometheus TSDB, we may want an avg aggregate + // for any step>0 in the Prometheus range vectors using seek, and it would + // be inefficient to use an avg aggregate. + functions := params.Functions + if functions == "" && step > 0 && step >= partition.RollupTime() && partition.AggrType().HasAverage() { + functions = "avg" + } + + // Check whether there are aggregations to add and aggregates aren't disabled + if functions != "" && !params.disableAllAggr { + + // If step isn't passed (e.g., when using the console), the step is the + // difference between the end (maxt) and start (mint) times (e.g., 5 minutes) + if step == 0 { + step = maxt - mint + } + + if step > partition.RollupTime() && params.disableClientAggr { + step = partition.RollupTime() + } + + newAggrSeries, err := aggregate.NewAggregateSeries(functions, + "v", + partition.AggrBuckets(), + step, + partition.RollupTime(), + params.Windows) + + if err != nil { + return nil, err + } + + // Use aggregates if possible on the TSDB side or if client aggregation + // is enabled (Prometheus is disabled on the client side) + newSet.canAggregate = newAggrSeries.CanAggregate(partition.AggrType()) + if newSet.canAggregate || !params.disableClientAggr { + newSet.aggrSeries = newAggrSeries + newSet.interval = step + newSet.aggrIdx = newAggrSeries.NumFunctions() - 1 + newSet.overlapWin = params.Windows + newSet.noAggrLbl = params.disableClientAggr // Don't add an "Aggregate" label in Prometheus (see aggregate.AggregateLabel) + } + } + + err := newSet.getItems(partition, params.Name, params.Filter, q.container, q.cfg.QryWorkers) + + return newSet, err +} + +// Return the current metric names +func (q *V3ioQuerier) LabelValues(labelKey string) (result []string, err error) { + q.performanceReporter.WithTimer("LabelValuesTimer", func() { + if labelKey == "__name__" { + result, err = q.getMetricNames() + } else { + result, err = q.getLabelValues(labelKey) + } + }) + return +} + +func (q *V3ioQuerier) LabelNames() ([]string, error) { + return nil, nil +} + +func (q *V3ioQuerier) Close() error { + return nil +} + +func (q *V3ioQuerier) getMetricNames() ([]string, error) { + input := v3io.GetItemsInput{ + Path: q.cfg.TablePath + "/names/", + AttributeNames: []string{"__name"}, + } + + iter, err := utils.NewAsyncItemsCursor(q.container, &input, q.cfg.QryWorkers, []string{}, q.logger) + if err != nil { + return nil, err + } + + var metricNames []string + + for iter.Next() { + metricNames = append(metricNames, iter.GetField("__name").(string)) + } + + sort.Sort(sort.StringSlice(metricNames)) + + if iter.Err() != nil { + q.logger.InfoWith("Failed to read metric names; returning an empty list.", "err", iter.Err().Error()) + } + + return metricNames, nil +} + +func (q *V3ioQuerier) getLabelValues(labelKey string) ([]string, error) { + + // Sync the partition manager (hack) + err := q.partitionMngr.ReadAndUpdateSchema() + if err != nil { + return nil, err + } + + partitionPaths := q.partitionMngr.GetPartitionsPaths() + var numPartitions = len(partitionPaths) + + // If there are no partitions yet, there are no labels + if numPartitions == 0 { + return nil, nil + } + + //take the last FULL partition (unless there is only 1-2 partitions) + var partitionIndex = numPartitions - 1 + if numPartitions > 2 { + partitionIndex-- + } + + labelValuesMap := map[string]struct{}{} + + // Get all label sets + input := v3io.GetItemsInput{ + Path: partitionPaths[partitionIndex], + AttributeNames: []string{"_lset"}, + } + + iter, err := utils.NewAsyncItemsCursor(q.container, &input, q.cfg.QryWorkers, []string{}, q.logger) + if err != nil { + return nil, err + } + + // Iterate over the results + for iter.Next() { + labelSet := iter.GetField("_lset").(string) + + // For a label set of k1=v1,k2=v2, k2=v3, for labelKey "k2", for example, + // we want to convert the set to [v2, v3] + + // Split at "," to get k=v pairs + for _, label := range strings.Split(labelSet, ",") { + + // Split at "=" to get the label key and label value + splitLabel := strings.SplitN(label, "=", 2) + + // If we have two elements and the first element (the key) is equal + // to what we're looking for, save the label value in the map. + // Use a map to prevent duplicates. + if len(splitLabel) == 2 && splitLabel[0] == labelKey { + labelValuesMap[splitLabel[1]] = struct{}{} + } + } + } + + if iter.Err() != nil { + q.logger.InfoWith("Failed to read label values, returning empty list", "err", iter.Err().Error()) + } + + var labelValues []string + for labelValue := range labelValuesMap { + labelValues = append(labelValues, labelValue) + } + + return labelValues, nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go new file mode 100644 index 00000000..6d821855 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go @@ -0,0 +1,310 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package querier + +import ( + "strings" + + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// Create a new series from chunks +func NewSeries(set *V3ioSeriesSet) utils.Series { + newSeries := V3ioSeries{set: set} + newSeries.lset = initLabels(set) + newSeries.initSeriesIter() + return &newSeries +} + +type V3ioSeries struct { + set *V3ioSeriesSet + lset utils.Labels + iter utils.SeriesIterator + hash uint64 +} + +func (s *V3ioSeries) Labels() utils.Labels { return s.lset } + +// Get the unique series key for sorting +func (s *V3ioSeries) GetKey() uint64 { + if s.hash == 0 { + val, err := s.lset.HashWithMetricName() + if err != nil { + s.set.logger.Error(err) + return 0 + } + s.hash = val + + } + return s.hash +} + +func (s *V3ioSeries) Iterator() utils.SeriesIterator { return s.iter } + +// Initialize the label set from _lset and _name attributes +func initLabels(set *V3ioSeriesSet) utils.Labels { + name, nok := set.iter.GetField("_name").(string) + if !nok { + name = "UNKNOWN" + } + lsetAttr, lok := set.iter.GetField("_lset").(string) + if !lok { + lsetAttr = "UNKNOWN" + } + if !lok || !nok { + set.logger.Error("Error in initLabels; bad field values.") + } + + lset := utils.Labels{utils.Label{Name: "__name__", Value: name}} + + splitLset := strings.Split(lsetAttr, ",") + for _, label := range splitLset { + kv := strings.Split(label, "=") + if len(kv) > 1 { + lset = append(lset, utils.Label{Name: kv[0], Value: kv[1]}) + } + } + + return lset +} + +// Initialize the series from values, metadata, and attributes +func (s *V3ioSeries) initSeriesIter() { + + maxt := s.set.maxt + maxTime := s.set.iter.GetField(config.MaxTimeAttrName) + if maxTime != nil && int64(maxTime.(int)) < maxt { + maxt = int64(maxTime.(int)) + } + + newIterator := v3ioSeriesIterator{ + mint: s.set.mint, maxt: maxt} + newIterator.chunks = []chunkenc.Chunk{} + newIterator.chunksMax = []int64{} + + // Create and initialize a chunk encoder per chunk blob + for i, attr := range s.set.attrs { + values := s.set.iter.GetField(attr) + + if values != nil { + bytes := values.([]byte) + chunk, err := chunkenc.FromData(s.set.logger, chunkenc.EncXOR, bytes, 0) + if err != nil { + s.set.logger.ErrorWith("Error reading chunk buffer", "Lset", s.lset, "err", err) + } else { + newIterator.chunks = append(newIterator.chunks, chunk) + newIterator.chunksMax = append(newIterator.chunksMax, + s.set.chunk0Time+int64(i+1)*s.set.partition.TimePerChunk()-1) + } + } + + } + + if len(newIterator.chunks) == 0 { + // If there's no data, create a null iterator + s.iter = &utils.NullSeriesIterator{} + } else { + newIterator.iter = newIterator.chunks[0].Iterator() + s.iter = &newIterator + } +} + +// Chunk-list series iterator +type v3ioSeriesIterator struct { + mint, maxt int64 // TBD per block + err error + + chunks []chunkenc.Chunk + + chunkIndex int + chunksMax []int64 + iter chunkenc.Iterator +} + +// Advance the iterator to the specified chunk and time +func (it *v3ioSeriesIterator) Seek(t int64) bool { + + // Seek time is after the item's end time (maxt) + if t > it.maxt { + return false + } + + // Seek to the first valid value after t + if t < it.mint { + t = it.mint + } + + // Check the first element + t0, _ := it.iter.At() + if t0 > it.maxt { + return false + } + if t <= t0 { + return true + } + + for { + if it.iter.Next() { + t0, _ := it.iter.At() + if t0 > it.maxt { + return false + } + if t > it.chunksMax[it.chunkIndex] { + // This chunk is too far behind; move to the next chunk or + // Return false if it's the last chunk + if it.chunkIndex == len(it.chunks)-1 { + return false + } + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + } else if t <= t0 { + // The cursor (t0) is either on t or just passed t + return true + } + } else { + // End of chunk; move to the next chunk or return if last + if it.chunkIndex == len(it.chunks)-1 { + return false + } + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + } + } +} + +// Move to the next iterator item +func (it *v3ioSeriesIterator) Next() bool { + if it.iter.Next() { + t, _ := it.iter.At() + if t < it.mint { + if !it.Seek(it.mint) { + return false + } + t, _ = it.At() + + return t <= it.maxt + } + if t <= it.maxt { + return true + } + return false + } + + if err := it.iter.Err(); err != nil { + return false + } + if it.chunkIndex == len(it.chunks)-1 { + return false + } + + it.chunkIndex++ + it.iter = it.chunks[it.chunkIndex].Iterator() + return it.Next() +} + +// Read the time and value at the current location +func (it *v3ioSeriesIterator) At() (t int64, v float64) { return it.iter.At() } + +func (it *v3ioSeriesIterator) AtString() (t int64, v string) { return it.iter.AtString() } + +func (it *v3ioSeriesIterator) Err() error { return it.iter.Err() } + +func (it *v3ioSeriesIterator) Encoding() chunkenc.Encoding { return chunkenc.EncXOR } + +// Aggregates (count, avg, sum, ..) series and iterator + +func NewAggrSeries(set *V3ioSeriesSet, aggr aggregate.AggrType) *V3ioSeries { + newSeries := V3ioSeries{set: set} + lset := initLabels(set) + if !set.noAggrLbl { + lset = append(lset, utils.Label{Name: aggregate.AggregateLabel, Value: aggr.String()}) + } + newSeries.lset = lset + + if set.nullSeries { + newSeries.iter = &utils.NullSeriesIterator{} + } else { + + // `set` - the iterator "iterates" over stateful data - it holds a + // "current" set and aggrSet. This requires copying all the required + // stateful data into the iterator (e.g., aggrSet) so that when it's + // evaluated it will hold the proper pointer. + newSeries.iter = &aggrSeriesIterator{ + set: set, + aggrSet: set.aggrSet, + aggrType: aggr, + index: -1, + } + } + + return &newSeries +} + +type aggrSeriesIterator struct { + set *V3ioSeriesSet + aggrSet *aggregate.Set + aggrType aggregate.AggrType + index int + err error +} + +// Advance an iterator to the specified time (t) +func (s *aggrSeriesIterator) Seek(t int64) bool { + if t <= s.set.baseTime { + s.index = s.getNextValidCell(-1) + return true + } + + if t > s.set.baseTime+int64(s.aggrSet.GetMaxCell())*s.set.interval { + return false + } + + s.index = int((t - s.set.baseTime) / s.set.interval) + return true +} + +// Advance an iterator to the next time interval/bucket +func (s *aggrSeriesIterator) Next() bool { + // Advance the index to the next non-empty cell + s.index = s.getNextValidCell(s.index) + return s.index <= s.aggrSet.GetMaxCell() +} + +func (s *aggrSeriesIterator) getNextValidCell(from int) (nextIndex int) { + for nextIndex = from + 1; nextIndex <= s.aggrSet.GetMaxCell() && !s.aggrSet.HasData(nextIndex); nextIndex++ { + } + return +} + +// Return the time and value at the current bucket +func (s *aggrSeriesIterator) At() (t int64, v float64) { + val, _ := s.aggrSet.GetCellValue(s.aggrType, s.index) + return s.aggrSet.GetCellTime(s.set.baseTime, s.index), val +} + +func (s *aggrSeriesIterator) AtString() (t int64, v string) { return 0, "" } + +func (s *aggrSeriesIterator) Encoding() chunkenc.Encoding { return chunkenc.EncXOR } + +func (s *aggrSeriesIterator) Err() error { return s.err } diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go new file mode 100644 index 00000000..ab2bbbb4 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go @@ -0,0 +1,235 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package querier + +import ( + "github.com/nuclio/logger" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +// holds the query result set +type V3ioSeriesSet struct { + err error + logger logger.Logger + partition *partmgr.DBPartition + iter utils.ItemsCursor + mint, maxt int64 + attrs []string + chunk0Time int64 + + interval int64 + nullSeries bool + overlapWin []int + aggrSeries *aggregate.Series + aggrIdx int + canAggregate bool + currSeries utils.Series + aggrSet *aggregate.Set + noAggrLbl bool + baseTime int64 +} + +// Get relevant items and attributes from the TSDB and create an iterator +// TODO: get items per partition + merge, per partition calc attrs +func (s *V3ioSeriesSet) getItems(partition *partmgr.DBPartition, name, filter string, container v3io.Container, workers int) error { + + path := partition.GetTablePath() + shardingKeys := []string{} + if name != "" { + shardingKeys = partition.GetShardingKeys(name) + } + attrs := []string{config.LabelSetAttrName, config.OutOfOrderAttrName, "_name", config.MaxTimeAttrName} + + if s.aggrSeries != nil && s.canAggregate { + s.attrs = s.aggrSeries.GetAttrNames() + } else { + s.attrs, s.chunk0Time = s.partition.Range2Attrs("v", s.mint, s.maxt) + } + attrs = append(attrs, s.attrs...) + + s.logger.DebugWith("Select - GetItems", "path", path, "attr", attrs, "filter", filter, "name", name) + input := v3io.GetItemsInput{Path: path, AttributeNames: attrs, Filter: filter, ShardingKey: name} + iter, err := utils.NewAsyncItemsCursor(container, &input, workers, shardingKeys, s.logger) + if err != nil { + return err + } + + s.iter = iter + return nil + +} + +// Advance to the next series +func (s *V3ioSeriesSet) Next() bool { + + // Create a raw-chunks series (not aggregated) + if s.aggrSeries == nil { + if s.iter.Next() { + s.currSeries = NewSeries(s) + return true + } + return false + } + + // Create multiple aggregation series (one per aggregation function). + // The index is initialized as numfunc-1 (so the first +1 and modulo will equal 0). + if s.aggrIdx == s.aggrSeries.NumFunctions()-1 { + // If there are no more items (from GetItems cursor), return with EOF + if !s.iter.Next() { + return false + } + + s.nullSeries = false + + if s.canAggregate { + + // Create a series from aggregation arrays (in the TSDB table) if + // the partition stores the desired aggregates + maxtUpdate := s.maxt + maxTime := s.iter.GetField(config.MaxTimeAttrName) + if maxTime != nil && int64(maxTime.(int)) < s.maxt { + maxtUpdate = int64(maxTime.(int)) + } + + start := s.partition.Time2Bucket(s.mint) + end := s.partition.Time2Bucket(s.maxt+s.interval) + 1 + + // Calculate the length of the returned array: time-range/interval + 2 + length := int((maxtUpdate-s.mint)/s.interval) + 2 + + if s.overlapWin != nil { + s.baseTime = s.maxt + } else { + s.baseTime = s.mint + } + + if length > 0 { + attrs := s.iter.GetFields() + aggrSet, err := s.aggrSeries.NewSetFromAttrs(length, start, end, s.mint, s.maxt, &attrs) + if err != nil { + s.err = err + return false + } + + s.aggrSet = aggrSet + } else { + s.nullSeries = true + } + + } else { + + // Create a series from raw chunks + s.currSeries = NewSeries(s) + + // Calculate the number of cells: (maxt-mint)/interval + 1 + numCells := (s.maxt-s.mint)/s.interval + 1 + + s.aggrSet = s.aggrSeries.NewSetFromChunks(int(numCells)) + if s.overlapWin != nil { + s.chunks2WindowedAggregates() + } else { + s.chunks2IntervalAggregates() + } + + } + } + + s.aggrIdx = (s.aggrIdx + 1) % s.aggrSeries.NumFunctions() + return true +} + +// Convert raw chunks to a fixed-interval aggregate +func (s *V3ioSeriesSet) chunks2IntervalAggregates() { + + iter := s.currSeries.Iterator() + if iter.Next() { + + s.baseTime = s.mint + + for { + t, v := iter.At() + s.aggrSet.AppendAllCells(int((t-s.baseTime)/s.interval), v) + if !iter.Next() { + break + } + } + } + + if iter.Err() != nil { + s.err = iter.Err() + return + } +} + +// Convert chunks to an overlapping-windows aggregate +func (s *V3ioSeriesSet) chunks2WindowedAggregates() { + + maxAligned := (s.maxt / s.interval) * s.interval + //baseTime := maxAligned - int64(s.overlapWin[0])*s.interval + + iter := s.currSeries.Iterator() + + if iter.Seek(s.baseTime) { + + if iter.Err() != nil { + s.err = iter.Err() + return + } + + s.baseTime = maxAligned + + for { + t, v := iter.At() + if t < maxAligned { + for i, win := range s.overlapWin { + if t > maxAligned-int64(win)*s.interval { + s.aggrSet.AppendAllCells(i, v) + } + } + } + if !iter.Next() { + s.err = iter.Err() + break + } + } + } +} + +// Return the current error +func (s *V3ioSeriesSet) Err() error { + if s.iter.Err() != nil { + return s.iter.Err() + } + return s.err +} + +// Return a series iterator +func (s *V3ioSeriesSet) At() utils.Series { + if s.aggrSeries == nil { + return s.currSeries + } + + return NewAggrSeries(s, s.aggrSeries.GetFunctions()[s.aggrIdx]) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go new file mode 100644 index 00000000..e3ee3fe7 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go @@ -0,0 +1,1141 @@ +// +build integration + +package tsdb_test + +import ( + "fmt" + "math" + "path" + "strconv" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + v3io "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + . "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func timeStringToMillis(timeStr string) int64 { + ta, _ := time.Parse(time.RFC3339, timeStr) + return ta.Unix() * 1000 +} +func TestDeleteTable(t *testing.T) { + ta, _ := time.Parse(time.RFC3339, "2018-10-03T05:00:00Z") + t1 := ta.Unix() * 1000 + tb, _ := time.Parse(time.RFC3339, "2018-10-07T05:00:00Z") + t2 := tb.Unix() * 1000 + tc, _ := time.Parse(time.RFC3339, "2018-10-11T05:00:00Z") + t3 := tc.Unix() * 1000 + td, _ := time.Parse(time.RFC3339, "2022-10-11T05:00:00Z") + futurePoint := td.Unix() * 1000 + + defaultTimeMillis := timeStringToMillis("2019-07-21T00:00:00Z") + generalData := []tsdbtest.DataPoint{ + // partition 1 + // chunk a + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + // partition 2 + // chunk a + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + // partition 3 + // chunk a + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}} + partitions1StartTime := timeStringToMillis("2019-07-21T00:00:00Z") + partitions2StartTime := timeStringToMillis("2019-07-23T00:00:00Z") + partitions3StartTime := timeStringToMillis("2019-07-25T00:00:00Z") + + testCases := []struct { + desc string + deleteParams DeleteParams + data tsdbtest.TimeSeries + expectedData map[string][]tsdbtest.DataPoint + expectedPartitions []int64 + ignoreReason string + }{ + {desc: "Should delete all table by time", + deleteParams: DeleteParams{ + From: 0, + To: 9999999999999, + IgnoreErrors: true, + }, + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}, + {Time: futurePoint, Value: 555.5}}, + }}, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, + }, + {desc: "Should delete all table by deleteAll", + deleteParams: DeleteParams{ + From: 0, + To: t1, + DeleteAll: true, + IgnoreErrors: true, + }, + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}, + {Time: futurePoint, Value: 555.5}}, + }}, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, + }, + {desc: "Should delete whole partitions", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Filter: "os == 'win'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-win": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + + { + desc: "Should delete partial chunk in the start", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 4*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk in the middle", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 3*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 7*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk in the end", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + }, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + }, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partially last chunk and update max time", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions3StartTime + 1*tsdbtest.HoursInMillis + 6*tsdbtest.MinuteInMillis, + To: partitions3StartTime + 1*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole last chunk and update max time", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions3StartTime + 1*tsdbtest.HoursInMillis, + To: partitions3StartTime + 2*tsdbtest.HoursInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole all samples in chunk but time range is not bigger then chunk", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 1*tsdbtest.HoursInMillis + 2*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 2*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testDeleteTSDBCase(t, + tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptDropTableOnTearDown, + Value: !test.deleteParams.DeleteAll}, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: test.data}, + ), test.deleteParams, test.expectedData, test.expectedPartitions) + }) + } +} + +func getCurrentPartitions(test *testing.T, container v3io.Container, path string) []int64 { + input := &v3io.GetItemInput{Path: path + "/.schema", + AttributeNames: []string{"*"}} + res, err := container.GetItemSync(input) + if err != nil { + test.Fatal(errors.Wrap(err, "failed to get schema")) + } + output := res.Output.(*v3io.GetItemOutput) + var partitions []int64 + for part := range output.Item { + partitionsStartTime, _ := strconv.ParseInt(part[1:], 10, 64) // parse attribute and discard attribute prefix + partitions = append(partitions, partitionsStartTime) + } + return partitions +} + +func testDeleteTSDBCase(test *testing.T, testParams tsdbtest.TestParams, deleteParams DeleteParams, + expectedData map[string][]tsdbtest.DataPoint, expectedPartitions []int64) { + + adapter, teardown := tsdbtest.SetUpWithData(test, testParams) + defer teardown() + + container, err := utils.CreateContainer(adapter.GetLogger("container"), testParams.V3ioConfig(), adapter.HTTPTimeout) + if err != nil { + test.Fatalf("failed to create new container. reason: %s", err) + } + + if err := adapter.DeleteDB(deleteParams); err != nil { + test.Fatalf("Failed to delete DB. reason: %s", err) + } + + if !deleteParams.DeleteAll { + actualPartitions := getCurrentPartitions(test, container, testParams.V3ioConfig().TablePath) + assert.ElementsMatch(test, expectedPartitions, actualPartitions, "remaining partitions are not as expected") + + qry, err := adapter.QuerierV2() + if err != nil { + test.Fatalf("Failed to create Querier. reason: %v", err) + } + + params := &pquerier.SelectParams{ + From: 0, + To: math.MaxInt64, + Filter: "1==1", + } + set, err := qry.Select(params) + if err != nil { + test.Fatalf("Failed to run Select. reason: %v", err) + } + + for set.Next() { + series := set.At() + labels := series.Labels() + osLabel := labels.Get("os") + metricName := labels.Get(config.PrometheusMetricNameAttribute) + iter := series.Iterator() + if iter.Err() != nil { + test.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + + actual, err := iteratorToSlice(iter) + if err != nil { + test.Fatal(err) + } + expectedDataKey := metricName + if osLabel != "" { + expectedDataKey = fmt.Sprintf("%v-%v", expectedDataKey, osLabel) + } + + assert.ElementsMatch(test, expectedData[expectedDataKey], actual, + "result data for '%v' didn't match, expected: %v\n actual: %v\n", expectedDataKey, expectedData[expectedDataKey], actual) + + } + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + } else { + container, tablePath := adapter.GetContainer() + tableSchemaPath := path.Join(tablePath, config.SchemaConfigFileName) + + // Validate: schema does not exist + _, err := container.GetObjectSync(&v3io.GetObjectInput{Path: tableSchemaPath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tableSchemaPath, err) + } + } + + // Validate: table does not exist + _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: tablePath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tablePath, err) + } + } + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go new file mode 100644 index 00000000..822d7224 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go @@ -0,0 +1,197 @@ +package schema + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const ( + Version = 4 + MaxV3ioArraySize = 130000 +) + +func NewSchema(v3ioCfg *config.V3ioConfig, samplesIngestionRate, aggregationGranularity, aggregatesList string, crossLabelSets string) (*config.Schema, error) { + return newSchema( + samplesIngestionRate, + aggregationGranularity, + aggregatesList, + crossLabelSets, + v3ioCfg.MinimumChunkSize, + v3ioCfg.MaximumChunkSize, + v3ioCfg.MaximumSampleSize, + v3ioCfg.MaximumPartitionSize, + config.DefaultSampleRetentionTime, + v3ioCfg.ShardingBucketsCount) +} + +func newSchema(samplesIngestionRate, aggregationGranularity, aggregatesList string, crossLabelSets string, minChunkSize, maxChunkSize, maxSampleSize, maxPartitionSize, sampleRetention, shardingBucketsCount int) (*config.Schema, error) { + rateInHours, err := rateToHours(samplesIngestionRate) + if err != nil { + return nil, errors.Wrapf(err, "Invalid samples ingestion rate (%s).", samplesIngestionRate) + } + + chunkInterval, partitionInterval, err := calculatePartitionAndChunkInterval(rateInHours, minChunkSize, maxChunkSize, maxSampleSize, maxPartitionSize) + if err != nil { + return nil, errors.Wrap(err, "Failed to calculate the chunk interval.") + } + + aggregates, err := aggregate.RawAggregatesToStringList(aggregatesList) + if err != nil { + return nil, errors.Wrapf(err, "Failed to parse aggregates list '%s'.", aggregatesList) + } + + if err := validateAggregatesGranularity(aggregationGranularity, partitionInterval, len(aggregates) > 0); err != nil { + return nil, err + } + + parsedCrossLabelSets := aggregate.ParseCrossLabelSets(crossLabelSets) + + if len(parsedCrossLabelSets) > 0 && len(aggregates) == 0 { + return nil, errors.New("Cross label aggregations must be used in conjunction with aggregations") + } + + if len(aggregates) == 0 { + aggregates = strings.Split(config.DefaultAggregates, ",") + } + + defaultRollup := config.Rollup{ + Aggregates: aggregates, + AggregationGranularity: aggregationGranularity, + StorageClass: config.DefaultStorageClass, + SampleRetention: sampleRetention, //TODO: make configurable + LayerRetentionTime: config.DefaultLayerRetentionTime, + } + + var preaggregates []config.PreAggregate + for _, labelSet := range parsedCrossLabelSets { + preaggregate := config.PreAggregate{ + Labels: labelSet, + Granularity: aggregationGranularity, + Aggregates: aggregates, + } + preaggregates = append(preaggregates, preaggregate) + } + + tableSchema := config.TableSchema{ + Version: Version, + RollupLayers: []config.Rollup{defaultRollup}, + ShardingBucketsCount: shardingBucketsCount, + PartitionerInterval: partitionInterval, + ChunckerInterval: chunkInterval, + PreAggregates: preaggregates, + } + + fields, err := aggregate.SchemaFieldFromString(aggregates, "v") + if err != nil { + return nil, errors.Wrapf(err, "Failed to create an aggregates list from string '%s'.", aggregates) + } + fields = append(fields, config.SchemaField{Name: "_name", Type: "string", Nullable: false, Items: ""}) + + partitionSchema := config.PartitionSchema{ + Version: tableSchema.Version, + Aggregates: aggregates, + AggregationGranularity: aggregationGranularity, + StorageClass: config.DefaultStorageClass, + SampleRetention: config.DefaultSampleRetentionTime, + ChunckerInterval: tableSchema.ChunckerInterval, + PartitionerInterval: tableSchema.PartitionerInterval, + } + + schema := &config.Schema{ + TableSchemaInfo: tableSchema, + PartitionSchemaInfo: partitionSchema, + Partitions: []*config.Partition{}, + Fields: fields, + } + + return schema, nil +} + +func calculatePartitionAndChunkInterval(rateInHours, minChunkSize, maxChunkSize, maxSampleSize, maxPartitionSize int) (string, string, error) { + maxNumberOfEventsPerChunk := maxChunkSize / maxSampleSize + minNumberOfEventsPerChunk := minChunkSize / maxSampleSize + + chunkInterval := maxNumberOfEventsPerChunk / rateInHours + if chunkInterval == 0 { + return "", "", fmt.Errorf("the samples ingestion rate (%v/h) is too high", rateInHours) + } + + // Make sure the expected chunk size is greater then the supported minimum. + if chunkInterval < minNumberOfEventsPerChunk/rateInHours { + return "", "", fmt.Errorf( + "the calculated chunk size is smaller than the minimum: samples ingestion rate = %v/h, calculated chunk interval = %v, minimum size = %v", + rateInHours, chunkInterval, minChunkSize) + } + + actualCapacityOfChunk := chunkInterval * rateInHours * maxSampleSize + numberOfChunksInPartition := 0 + + for (numberOfChunksInPartition+24)*actualCapacityOfChunk < maxPartitionSize { + numberOfChunksInPartition += 24 + } + if numberOfChunksInPartition == 0 { + return "", "", errors.Errorf("the samples ingestion rate (%v/h) is too high - cannot fit a partition in a day interval with the calculated chunk size (%v)", rateInHours, chunkInterval) + } + + partitionInterval := numberOfChunksInPartition * chunkInterval + return strconv.Itoa(chunkInterval) + "h", strconv.Itoa(partitionInterval) + "h", nil +} + +func rateToHours(samplesIngestionRate string) (int, error) { + parsingError := errors.New(`Invalid samples ingestion rate. The rate must be of the format "[0-9]+/[smh]". For example, "12/m"`) + + if len(samplesIngestionRate) < 3 { + return 0, parsingError + } + if samplesIngestionRate[len(samplesIngestionRate)-2] != '/' { + return 0, parsingError + } + + last := samplesIngestionRate[len(samplesIngestionRate)-1] + // Get the ingestion-rate samples number, ignoring the slash and time unit + samplesIngestionRate = samplesIngestionRate[:len(samplesIngestionRate)-2] + i, err := strconv.Atoi(samplesIngestionRate) + if err != nil { + return 0, errors.Wrap(err, parsingError.Error()) + } + if i <= 0 { + return 0, fmt.Errorf("invalid samples ingestion rate (%s). The rate cannot have a negative number of samples", samplesIngestionRate) + } + switch last { + case 's': + return i * 60 * 60, nil + case 'm': + return i * 60, nil + case 'h': + return i, nil + default: + return 0, parsingError + } +} + +func validateAggregatesGranularity(aggregationGranularity string, partitionInterval string, hasAggregates bool) error { + dayMillis := 24 * int64(time.Hour/time.Millisecond) + duration, err := utils.Str2duration(aggregationGranularity) + if err != nil { + return errors.Wrapf(err, "Failed to parse aggregation granularity '%s'.", aggregationGranularity) + } + + if dayMillis%duration != 0 && duration%dayMillis != 0 { + return errors.New("the aggregation granularity should be a divisor or a dividend of 1 day. Examples: \"10m\"; \"30m\"; \"2h\"") + } + + if hasAggregates { + partitionIntervalDuration, _ := utils.Str2duration(partitionInterval) // safe to ignore error since we create 'partitionInterval' + if partitionIntervalDuration/duration > MaxV3ioArraySize { + return errors.New("the size of the aggregation-granularity interval isn't sufficiently larger than the specified ingestion rate. Try increasing the granularity to get the expected pre-aggregation performance impact") + } + } + return nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema_test.go new file mode 100644 index 00000000..9817fd6f --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema_test.go @@ -0,0 +1,64 @@ +// +build unit + +package schema + +import ( + "fmt" + "testing" +) + +func TestRateToHour(t *testing.T) { + cases := []struct { + input string + output int + shouldFail bool + }{ + {input: "1/s", output: 3600}, + {input: "12/m", output: 12 * 60}, + {input: "2/h", output: 2}, + {input: "1m", shouldFail: true}, + {input: "1/t", shouldFail: true}, + {input: "-431/t", shouldFail: true}, + {input: "-1", shouldFail: true}, + {input: "", shouldFail: true}, + } + + for _, testCase := range cases { + t.Run(testCase.input, func(t *testing.T) { + actual, err := rateToHours(testCase.input) + if err != nil && !testCase.shouldFail { + t.Fatalf("got unexpected error %v", err) + } else if actual != testCase.output { + t.Fatalf("actual %v is not equal to expected %v", actual, testCase.output) + } + }) + } +} + +func TestAggregationGranularityValidation(t *testing.T) { + cases := []struct { + granularity string + partitionInterval string + hasAggregates bool + shouldFail bool + }{ + {granularity: "1h", partitionInterval: "48h", hasAggregates: true, shouldFail: false}, + {granularity: "15m", partitionInterval: "2880h", hasAggregates: true, shouldFail: false}, + {granularity: "1h", partitionInterval: "150000h", hasAggregates: true, shouldFail: true}, + {granularity: "1h", partitionInterval: "150000h", hasAggregates: false, shouldFail: false}, + {granularity: "30m", partitionInterval: "75000h", hasAggregates: true, shouldFail: true}, + } + + for _, testCase := range cases { + testName := fmt.Sprintf("%v - %v - %v", + testCase.granularity, testCase.partitionInterval, testCase.hasAggregates) + t.Run(testName, func(t *testing.T) { + err := validateAggregatesGranularity(testCase.granularity, testCase.partitionInterval, testCase.hasAggregates) + + if err != nil && !testCase.shouldFail || + err == nil && testCase.shouldFail { + t.Fatalf("test shouldFail=%v, and got error: %v", testCase.shouldFail, err) + } + }) + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdb_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdb_integration_test.go new file mode 100644 index 00000000..012478a1 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdb_integration_test.go @@ -0,0 +1,115 @@ +// +build integration + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package tsdb_test + +import ( + "math" + "testing" + "time" + + "github.com/stretchr/testify/suite" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +type testTsdbSuite struct { + suite.Suite +} + +func (suite *testTsdbSuite) TestAppend() { + testCtx := suite.T() + testParams := tsdbtest.NewTestParams(testCtx) + defer tsdbtest.SetUp(testCtx, testParams)() + + adapter, err := tsdb.NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + suite.Require().NoError(err) + + appender, err := adapter.Appender() + suite.Require().NoError(err) + + querier, err := adapter.Querier(nil, 0, math.MaxInt64) + suite.Require().NoError(err) + + t1 := suite.parseTime("2018-11-01T00:00:00Z") + t2 := suite.parseTime("2018-11-03T00:00:00Z") + + _, err = appender.Add( + utils.Labels{utils.Label{Name: "__name__", Value: "AAPL"}, utils.Label{Name: "market", Value: "usa"}}, + t1, + -91.0) + suite.Require().NoError(err) + _, err = appender.Add( + utils.Labels{utils.Label{Name: "__name__", Value: "AAL"}, utils.Label{Name: "market", Value: "usa"}}, + t1, + -87.0) + suite.Require().NoError(err) + _, err = appender.Add( + utils.Labels{utils.Label{Name: "__name__", Value: "AAP"}, utils.Label{Name: "market", Value: "usa"}}, + t2, + -50.0) + suite.Require().NoError(err) + + _, err = appender.WaitForCompletion(0) + suite.Require().NoError(err) + + set, err := querier.Select("", "min", int64(time.Hour/time.Millisecond), "1==1") + suite.Require().NoError(err) + + // TODO: Replace map[tv]struct{} with []tv once TSDB-37 is fixed. This open issue causes duplicate results. + var result = make(map[string]map[tv]struct{}) + for set.Next() { + suite.Require().Nil(set.Err()) + key := set.At().Labels().String() + var samples = make(map[tv]struct{}) + iter := set.At().Iterator() + for iter.Next() { + t, v := iter.At() + samples[tv{t: t, v: v}] = struct{}{} + } + result[key] = samples + } + + expected := map[string]map[tv]struct{}{ + `{__name__="AAPL", market="usa", Aggregate="min"}`: {tv{t: t1, v: -91}: struct{}{}}, + `{__name__="AAL", market="usa", Aggregate="min"}`: {tv{t: t1, v: -87}: struct{}{}}, + `{__name__="AAP", market="usa", Aggregate="min"}`: {tv{t: t2, v: -50}: struct{}{}}, + } + + suite.Require().Equal(expected, result) +} + +func (suite *testTsdbSuite) parseTime(timestamp string) int64 { + t, err := time.Parse(time.RFC3339, timestamp) + suite.Require().NoError(err) + return t.Unix() * 1000 +} + +type tv struct { + t int64 + v float64 +} + +func TestTsdbSuite(t *testing.T) { + suite.Run(t, new(testTsdbSuite)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go new file mode 100644 index 00000000..58b259c8 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go @@ -0,0 +1,65 @@ +package tsdbtest + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/config" +) + +const TsdbDefaultTestConfigPath = "testdata" + +// nolint: deadcode,varcheck +const relativeProjectPath = "src/github.com/v3io/v3io-tsdb" + +/* +This method will try and load the configuration file from several locations by the following order: +1. Environment variable named 'V3IO_TSDB_CONFIG' +2. Current package's 'testdata/v3io-tsdb-config.yaml' folder +3. $GOPATH/src/github.com/v3io/v3io-tsdb/v3io-tsdb-config.yaml +*/ +func GetV3ioConfigPath() (string, error) { + if configurationPath := os.Getenv(config.V3ioConfigEnvironmentVariable); configurationPath != "" { + return configurationPath, nil + } + + localConfigFile := filepath.Join(TsdbDefaultTestConfigPath, config.DefaultConfigurationFileName) + if _, err := os.Stat(localConfigFile); !os.IsNotExist(err) { + return localConfigFile, nil + } + + // Look for a parent directory containing a makefile and the configuration file (presumed to be the project root). + dirPath := "./" + for { + _, err := os.Stat(dirPath + "Makefile") + if err == nil { + confFilePath := dirPath + config.DefaultConfigurationFileName + _, err = os.Stat(confFilePath) + if err == nil { + return confFilePath, nil + } + break // Bail out if we found the makefile but the config is not there. + } + absolute, err := filepath.Abs(dirPath) + if err != nil || absolute == "/" { // Bail out if we reached the root. + break + } + dirPath += "../" + } + + return "", errors.Errorf("config file is not specified and could not be found") +} + +func LoadV3ioConfig() (*config.V3ioConfig, error) { + path, err := GetV3ioConfigPath() + if err != nil { + return nil, err + } + v3ioConfig, err := config.GetOrLoadFromFile(path) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("unable to load test configuration from '%s'", path)) + } + return v3ioConfig, nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config_test.go new file mode 100644 index 00000000..5a8817bf --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config_test.go @@ -0,0 +1,193 @@ +// +build unit + +package tsdbtest + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-tsdb/pkg/config" +) + +func createTestConfig(t *testing.T, path string) { + fullPath := filepath.Join(path, config.DefaultConfigurationFileName) + _, err := os.Create(fullPath) + if err != nil { + t.Fatalf("Failed to create file at %s. Error: %v", fullPath, err) + } + t.Logf("---> Created test configuration at: %s", fullPath) +} + +func deleteTestConfig(t *testing.T, path string) { + fullPath := filepath.Join(path, config.DefaultConfigurationFileName) + err := os.Remove(fullPath) + if err != nil && !os.IsNotExist(err) { + t.Errorf("Failed to remove file at %s. Error: %v", fullPath, err) + } + t.Logf("<--- Removed test configuration from: %s", fullPath) +} + +func TestGetV3ioConfigPath(t *testing.T) { + projectHome := "../../.." + testCases := []struct { + description string + expectedPath string + setup func() func() + }{ + {description: "get config from package testdata", + expectedPath: filepath.Join(TsdbDefaultTestConfigPath, config.DefaultConfigurationFileName), + setup: func() func() { + // Make this test agnostic to environment variables at runtime (store & recover on exit) + configPathEnv := os.Getenv(config.V3ioConfigEnvironmentVariable) + os.Unsetenv(config.V3ioConfigEnvironmentVariable) + + if _, err := os.Stat(filepath.Join(TsdbDefaultTestConfigPath, config.DefaultConfigurationFileName)); !os.IsNotExist(err) { + return func() { + os.Setenv(config.V3ioConfigEnvironmentVariable, configPathEnv) + } + } else { + path := TsdbDefaultTestConfigPath + if err := os.Mkdir(path, 0777); err != nil && !os.IsExist(err) { + t.Fatalf("Failed to mkdir %v", err) + } + createTestConfig(t, path) + return func() { + os.Setenv(config.V3ioConfigEnvironmentVariable, configPathEnv) + deleteTestConfig(t, path) + os.RemoveAll(path) + } + } + }}, + + {description: "get config from project root", + expectedPath: "./../../../v3io-tsdb-config.yaml", + setup: func() func() { + // Make this test agnostic to environment variables at runtime (store & recover on exit) + configPathEnv := os.Getenv(config.V3ioConfigEnvironmentVariable) + os.Unsetenv(config.V3ioConfigEnvironmentVariable) + + if _, err := os.Stat(filepath.Join(projectHome, config.DefaultConfigurationFileName)); !os.IsNotExist(err) { + return func() { + os.Setenv(config.V3ioConfigEnvironmentVariable, configPathEnv) + } + } else { + path := projectHome + createTestConfig(t, path) + return func() { + os.Setenv(config.V3ioConfigEnvironmentVariable, configPathEnv) + deleteTestConfig(t, path) + os.Remove(path) + } + } + }}, + + {description: "get config from env var", + expectedPath: getConfigPathFromEnvOrDefault(), + setup: func() func() { + env := os.Getenv(config.V3ioConfigEnvironmentVariable) + if env == "" { + os.Setenv(config.V3ioConfigEnvironmentVariable, config.DefaultConfigurationFileName) + return func() { + os.Unsetenv(config.V3ioConfigEnvironmentVariable) + } + } + return func() {} + }}, + } + + for _, test := range testCases { + t.Run(test.description, func(t *testing.T) { + testGetV3ioConfigPathCase(t, test.expectedPath, test.setup) + }) + } +} + +func getConfigPathFromEnvOrDefault() string { + configPath := os.Getenv(config.V3ioConfigEnvironmentVariable) + if configPath == "" { + configPath = config.DefaultConfigurationFileName + } + return configPath +} + +func testGetV3ioConfigPathCase(t *testing.T, expected string, setup func() func()) { + defer setup()() + path, err := GetV3ioConfigPath() + if err != nil { + t.Fatal("Failed to get configuration path", err) + } + assert.Equal(t, expected, path) +} + +func TestMergeConfig(t *testing.T) { + defaultCfg, err := config.GetOrDefaultConfig() + if err != nil { + t.Fatal("Failed to get default configuration", err) + } + + updateWithCfg := config.V3ioConfig{ + BatchSize: 128, + TablePath: "test-new-table", + MetricsReporter: config.MetricsReporterConfig{ + ReportOnShutdown: true, + RepotInterval: 120, + }, + } + + mergedCfg, err := defaultCfg.Merge(&updateWithCfg) + if err != nil { + t.Fatal("Failed to update default configuration", err) + } + + // Validate result structure + assert.Equal(t, mergedCfg.BatchSize, 128) + assert.Equal(t, mergedCfg.TablePath, "test-new-table") + assert.Equal(t, mergedCfg.MetricsReporter.ReportOnShutdown, true) + assert.Equal(t, mergedCfg.MetricsReporter.RepotInterval, 120) + + // Make sure that default configuration remains unchanged + snapshot, err := config.GetOrDefaultConfig() + if err != nil { + t.Fatal("Failed to get default configuration", err) + } + + assert.Equal(t, snapshot.BatchSize, defaultCfg.BatchSize) + assert.Equal(t, snapshot.TablePath, defaultCfg.TablePath) + assert.Equal(t, snapshot.MetricsReporter.ReportOnShutdown, defaultCfg.MetricsReporter.ReportOnShutdown) + assert.Equal(t, snapshot.MetricsReporter.RepotInterval, defaultCfg.MetricsReporter.RepotInterval) + + assert.NotNil(t, defaultCfg.BuildInfo) + assert.NotNil(t, defaultCfg.BuildInfo.String()) +} + +func TestWithDefaults(t *testing.T) { + myCfg := &config.V3ioConfig{ + BatchSize: 1024, + TablePath: "test-my-table", + MetricsReporter: config.MetricsReporterConfig{ + ReportOnShutdown: true, + RepotInterval: 180, + ReportPeriodically: true, + }, + } + + updatedCfg := config.WithDefaults(myCfg) + + // Make sure it didn't override anything + assert.Equal(t, updatedCfg.BatchSize, myCfg.BatchSize) + assert.Equal(t, updatedCfg.TablePath, myCfg.TablePath) + assert.Equal(t, updatedCfg.MetricsReporter.ReportPeriodically, myCfg.MetricsReporter.ReportPeriodically) + assert.Equal(t, updatedCfg.MetricsReporter.RepotInterval, myCfg.MetricsReporter.RepotInterval) + assert.Equal(t, updatedCfg.MetricsReporter.ReportOnShutdown, myCfg.MetricsReporter.ReportOnShutdown) + + // and default value is set for ShardingBucketsCount + assert.Equal(t, updatedCfg.ShardingBucketsCount, config.DefaultShardingBucketsCount) + + // WithDefaults method does not create new configuration struct, therefore result object has the same address as myCfg + assert.Equal(t, myCfg, updatedCfg) + + assert.NotNil(t, updatedCfg.BuildInfo) + assert.NotNil(t, updatedCfg.BuildInfo.String()) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils/schema.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils/schema.go new file mode 100644 index 00000000..bae13b4b --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils/schema.go @@ -0,0 +1,21 @@ +package testutils + +import ( + "testing" + + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" +) + +func CreateSchema(t testing.TB, aggregates string) *config.Schema { + v3ioCfg, err := config.GetOrDefaultConfig() + if err != nil { + t.Fatalf("Failed to obtain a TSDB configuration. Error: %v", err) + } + + schm, err := schema.NewSchema(v3ioCfg, "1/s", "1h", aggregates, "") + if err != nil { + t.Fatalf("Failed to create a TSDB schema. Error: %v", err) + } + return schm +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go new file mode 100644 index 00000000..879b8cf7 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go @@ -0,0 +1,406 @@ +package tsdbtest + +import ( + json2 "encoding/json" + "fmt" + "os" + "path" + "regexp" + "strings" + "testing" + "time" + + "github.com/v3io/v3io-tsdb/internal/pkg/performance" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + // nolint: golint + . "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const MinuteInMillis = 60 * 1000 +const HoursInMillis = 60 * MinuteInMillis +const DaysInMillis = 24 * HoursInMillis + +type DataPoint struct { + Time int64 + Value interface{} +} + +func (dp DataPoint) Equals(other DataPoint) bool { + if &dp.Time != &other.Time { + return true + } + if dp.Time != other.Time { + return false + } + + switch dpVal := dp.Value.(type) { + case float64: + switch oVal := other.Value.(type) { + case float64: + return dpVal == oVal + case int: + return dpVal == float64(oVal) + default: + return false + } + case int: + switch oVal := other.Value.(type) { + case float64: + return float64(dpVal) == oVal + case int: + return dpVal == oVal + default: + return false + } + case string: + switch oVal := other.Value.(type) { + case string: + return oVal == dpVal + case float64: + soVal := fmt.Sprintf("%f", oVal) + return dpVal == soVal + case int: + soVal := fmt.Sprintf("%d", oVal) + return dpVal == soVal + default: + return false + } + default: + return false + } +} + +type Metric struct { + Name string + Labels utils.Labels + Data []DataPoint + ExpectedCount *int +} +type TimeSeries []Metric + +const OptDropTableOnTearDown = "DropTableOnTearDown" +const OptIgnoreReason = "IgnoreReason" +const OptTimeSeries = "TimeSeries" +const OptV3ioConfig = "V3ioConfig" + +type TestParams map[string]interface{} +type TestOption struct { + Key string + Value interface{} +} + +func NewTestParams(t testing.TB, opts ...TestOption) TestParams { + initialSize := len(opts) + testOpts := make(TestParams, initialSize) + + // Initialize defaults + testOpts[OptDropTableOnTearDown] = true + testOpts[OptIgnoreReason] = "" + testOpts[OptTimeSeries] = TimeSeries{} + + defaultV3ioConfig, err := LoadV3ioConfig() + if err != nil { + t.Fatalf("Unable to get V3IO configuration.\nError: %v", err) + } + + //defaultV3ioConfig.TablePath = PrefixTablePath(t.Name()) + testOpts[OptV3ioConfig] = defaultV3ioConfig + + for _, opt := range opts { + testOpts[opt.Key] = opt.Value + } + + return testOpts +} + +func (tp TestParams) TimeSeries() TimeSeries { + return tp[OptTimeSeries].(TimeSeries) +} +func (tp TestParams) DropTableOnTearDown() bool { + return tp[OptDropTableOnTearDown].(bool) +} +func (tp TestParams) IgnoreReason() string { + return tp[OptIgnoreReason].(string) +} +func (tp TestParams) V3ioConfig() *config.V3ioConfig { + return tp[OptV3ioConfig].(*config.V3ioConfig) +} + +// DataPointTimeSorter sorts DataPoints by time +type DataPointTimeSorter []DataPoint + +func (a DataPointTimeSorter) Len() int { return len(a) } +func (a DataPointTimeSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DataPointTimeSorter) Less(i, j int) bool { return a[i].Time < a[j].Time } + +type Sample struct { + Lset utils.Labels + Time string + Value float64 +} + +func DeleteTSDB(t testing.TB, v3ioConfig *config.V3ioConfig) { + adapter, err := NewV3ioAdapter(v3ioConfig, nil, nil) + if err != nil { + t.Fatalf("Failed to create an adapter. Reason: %s", err) + } + + if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { + t.Fatalf("Failed to delete a TSDB instance (table) on teardown. Reason: %s", err) + } +} + +func CreateTestTSDB(t testing.TB, v3ioConfig *config.V3ioConfig) { + CreateTestTSDBWithAggregates(t, v3ioConfig, "*") +} + +func CreateTestTSDBWithAggregates(t testing.TB, v3ioConfig *config.V3ioConfig, aggregates string) { + schema := testutils.CreateSchema(t, aggregates) + if err := CreateTSDB(v3ioConfig, schema, nil); err != nil { + v3ioConfigAsJSON, _ := json2.MarshalIndent(v3ioConfig, "", " ") + t.Fatalf("Failed to create a TSDB instance (table). Reason: %v\nConfiguration:\n%s", err, string(v3ioConfigAsJSON)) + } +} + +func tearDown(t testing.TB, v3ioConfig *config.V3ioConfig, testParams TestParams) { + // Don't delete the TSDB table if the test failed or test expects that + if !t.Failed() && testParams.DropTableOnTearDown() { + DeleteTSDB(t, v3ioConfig) + } +} + +func SetUp(t testing.TB, testParams TestParams) func() { + v3ioConfig := testParams.V3ioConfig() + + if overrideTableName, ok := testParams["override_test_name"]; ok { + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%v", overrideTableName)) + } else { + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) + } + + CreateTestTSDB(t, v3ioConfig) + + // Measure performance + metricReporter, err := performance.DefaultReporterInstance() + if err != nil { + t.Fatalf("Unable to initialize the performance metrics reporter. Reason: %v", err) + } + // nolint: errcheck + metricReporter.Start() + + return func() { + // nolint: errcheck + defer metricReporter.Stop() + tearDown(t, v3ioConfig, testParams) + } +} + +func SetUpWithData(t *testing.T, testOpts TestParams) (*V3ioAdapter, func()) { + teardown := SetUp(t, testOpts) + adapter := InsertData(t, testOpts) + return adapter, teardown +} + +func SetUpWithDBConfig(t *testing.T, schema *config.Schema, testParams TestParams) func() { + v3ioConfig := testParams.V3ioConfig() + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) + if err := CreateTSDB(v3ioConfig, schema, nil); err != nil { + v3ioConfigAsJSON, _ := json2.MarshalIndent(v3ioConfig, "", " ") + t.Fatalf("Failed to create a TSDB instance (table). Reason: %s\nConfiguration:\n%s", err, string(v3ioConfigAsJSON)) + } + + // Measure performance + metricReporter, err := performance.DefaultReporterInstance() + if err != nil { + t.Fatalf("Unable to initialize the performance metrics reporter. Error: %v", err) + } + // nolint: errcheck + metricReporter.Start() + + return func() { + // nolint: errcheck + defer metricReporter.Stop() + tearDown(t, v3ioConfig, testParams) + } +} + +func InsertData(t *testing.T, testParams TestParams) *V3ioAdapter { + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create a V3IO TSDB adapter. Reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get an appender. Reason: %s", err) + } + + timeSeries := testParams.TimeSeries() + + for _, metric := range timeSeries { + + labels := utils.Labels{utils.Label{Name: "__name__", Value: metric.Name}} + labels = append(labels, metric.Labels...) + + ref, err := appender.Add(labels, metric.Data[0].Time, metric.Data[0].Value) + if err != nil { + t.Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) + } + for _, curr := range metric.Data[1:] { + err := appender.AddFast(labels, ref, curr.Time, curr.Value) + if err != nil { + t.Fatalf("Failed to AddFast. Reason: %s", err) + } + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for TSDB append completion. Reason: %s", err) + } + } + + return adapter +} + +func ValidateCountOfSamples(t testing.TB, adapter *V3ioAdapter, metricName string, expected int, startTimeMs, endTimeMs int64, queryAggStep int64) { + + var stepSize int64 + if queryAggStep <= 0 { + var err error + stepSize, err = utils.Str2duration("1h") + if err != nil { + t.Fatal(err, "Failed to create an aggregation interval (step).") + } + } else { + stepSize = queryAggStep + } + + qry, err := adapter.QuerierV2() + if err != nil { + t.Fatal(err, "Failed to create a Querier instance.") + } + + selectParams := &pquerier.SelectParams{From: startTimeMs - stepSize, + To: endTimeMs, + Functions: "count", + Step: stepSize, + Filter: fmt.Sprintf("starts(__name__, '%v')", metricName)} + set, _ := qry.Select(selectParams) + + var actualCount int + for set.Next() { + if set.Err() != nil { + t.Fatal(set.Err(), "Failed to get the next element from the result set.") + } + + series := set.At() + iter := series.Iterator() + for iter.Next() { + if iter.Err() != nil { + t.Fatal(set.Err(), "Failed to get the next time-value pair from the iterator.") + } + _, v := iter.At() + actualCount += int(v) + } + } + + if set.Err() != nil { + t.Fatal(set.Err()) + } + + if expected != actualCount { + t.Fatalf("Check failed: the metric samples' actual count isn't as expected [%d(actualCount) != %d(expected)].", actualCount, expected) + } + + t.Logf("PASS: the metric-samples actual count matches the expected total count [%d(actualCount) == %d(expected)].", actualCount, expected) +} + +func ValidateRawData(t testing.TB, adapter *V3ioAdapter, metricName string, startTimeMs, endTimeMs int64, isValid func(*DataPoint, *DataPoint) bool) { + + qry, err := adapter.Querier(nil, startTimeMs, endTimeMs) + if err != nil { + t.Fatal(err, "Failed to create a Querier instance.") + } + + set, _ := qry.Select(metricName, "", 0, "") + + for set.Next() { + // Start over for each label set + var lastDataPoint *DataPoint + + if set.Err() != nil { + t.Fatal(set.Err(), "Failed to get the next element from a result set.") + } + + series := set.At() + iter := series.Iterator() + for iter.Next() { + if iter.Err() != nil { + t.Fatal(set.Err(), "Failed to get the next time-value pair from an iterator.") + } + currentTime, currentValue := iter.At() + currentDataPoint := &DataPoint{Time: currentTime, Value: currentValue} + + if lastDataPoint != nil { + switch dataType := lastDataPoint.Value.(type) { + case string, float64, int, int64: + // Note: We cast float to integer to eliminate the risk of a precision error + if !isValid(lastDataPoint, currentDataPoint) { + t.Fatalf("The raw-data consistency check failed: metric name='%s'\n\tisValid(%v, %v) == false", + metricName, lastDataPoint, currentDataPoint) + } + default: + t.Fatalf("Got value of unsupported data type: %T", dataType) + } + } + lastDataPoint = currentDataPoint + } + } + + if set.Err() != nil { + t.Fatal(set.Err()) + } +} + +func NormalizePath(path string) string { + chars := []string{":", "+"} + r := strings.Join(chars, "") + re := regexp.MustCompile("[" + r + "]+") + return re.ReplaceAllString(path, "_") +} + +func PrefixTablePath(tablePath string) string { + base := os.Getenv("TSDB_TEST_TABLE_PATH") + if base == "" { + return tablePath + } + return path.Join(os.Getenv("TSDB_TEST_TABLE_PATH"), tablePath) +} + +func IteratorToSlice(it chunkenc.Iterator) ([]DataPoint, error) { + var result []DataPoint + for it.Next() { + t, v := it.At() + if it.Err() != nil { + return nil, it.Err() + } + result = append(result, DataPoint{Time: t, Value: v}) + } + return result, nil +} + +func NanosToMillis(nanos int64) int64 { + millis := nanos / int64(time.Millisecond) + return millis +} + +func DateStringToMillis(date string) (int64, error) { + t, err := time.Parse(time.RFC3339, date) + if err != nil { + return 0, err + } + return t.Unix() * 1000, nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go new file mode 100644 index 00000000..ed5a9ee3 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go @@ -0,0 +1,845 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package tsdb + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "math" + pathUtil "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/nuclio/logger" + "github.com/pkg/errors" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/dataplane/http" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/appender" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + "github.com/v3io/v3io-tsdb/pkg/querier" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const ( + defaultHTTPTimeout = 30 * time.Second + + errorCodeString = "ErrorCode" + falseConditionOuterErrorCode = "184549378" // todo: change codes + falseConditionInnerErrorCode = "385876025" + maxExpressionsInUpdateItem = 1500 // max is 2000, we're taking a buffer since it doesn't work with 2000 +) + +type V3ioAdapter struct { + startTimeMargin int64 + logger logger.Logger + container v3io.Container + HTTPTimeout time.Duration + MetricsCache *appender.MetricsCache + cfg *config.V3ioConfig + partitionMngr *partmgr.PartitionManager +} + +type DeleteParams struct { + Metrics []string + Filter string + From, To int64 + DeleteAll bool + + IgnoreErrors bool +} + +func CreateTSDB(cfg *config.V3ioConfig, schema *config.Schema, container v3io.Container) error { + + lgr, _ := utils.NewLogger(cfg.LogLevel) + httpTimeout := parseHTTPTimeout(cfg, lgr) + var err error + if container == nil { + container, err = utils.CreateContainer(lgr, cfg, httpTimeout) + if err != nil { + return errors.Wrap(err, "Failed to create a data container.") + } + } + data, err := json.Marshal(schema) + if err != nil { + return errors.Wrap(err, "Failed to marshal the TSDB schema file.") + } + + dataPlaneInput := v3io.DataPlaneInput{Timeout: httpTimeout} + + path := pathUtil.Join(cfg.TablePath, config.SchemaConfigFileName) + // Check whether the config file already exists, and abort if it does + _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: path, DataPlaneInput: dataPlaneInput}) + if err == nil { + return fmt.Errorf("A TSDB table already exists at path '" + cfg.TablePath + "'.") + } + + err = container.PutObjectSync(&v3io.PutObjectInput{Path: path, Body: data, DataPlaneInput: dataPlaneInput}) + if err != nil { + return errors.Wrapf(err, "Failed to create a TSDB schema at path '%s/%s/%s'.", cfg.WebAPIEndpoint, cfg.Container, path) + } + return err +} + +func parseHTTPTimeout(cfg *config.V3ioConfig, logger logger.Logger) time.Duration { + if cfg.HTTPTimeout == "" { + return defaultHTTPTimeout + } + timeout, err := time.ParseDuration(cfg.HTTPTimeout) + if err != nil { + logger.Warn("Failed to parse httpTimeout '%s'. Defaulting to %d millis.", cfg.HTTPTimeout, defaultHTTPTimeout/time.Millisecond) + return defaultHTTPTimeout + } + return timeout +} + +// Create a new TSDB adapter, similar to Prometheus TSDB adapter but with a few +// extensions. The Prometheus compliant adapter is found under /promtsdb. +func NewV3ioAdapter(cfg *config.V3ioConfig, container v3io.Container, logger logger.Logger) (*V3ioAdapter, error) { + + var err error + newV3ioAdapter := V3ioAdapter{} + newV3ioAdapter.cfg = cfg + if logger != nil { + newV3ioAdapter.logger = logger + } else { + newV3ioAdapter.logger, err = utils.NewLogger(cfg.LogLevel) + if err != nil { + return nil, err + } + } + + newV3ioAdapter.HTTPTimeout = parseHTTPTimeout(cfg, logger) + + if container != nil { + newV3ioAdapter.container = container + } else { + newV3ioAdapter.container, err = utils.CreateContainer(newV3ioAdapter.logger, cfg, newV3ioAdapter.HTTPTimeout) + if err != nil { + return nil, errors.Wrap(err, "Failed to create V3IO data container") + } + } + + err = newV3ioAdapter.connect() + + return &newV3ioAdapter, err +} + +func NewContainer(v3ioURL string, numWorkers int, accessKey string, username string, password string, containerName string, logger logger.Logger) (v3io.Container, error) { + newContextInput := &v3iohttp.NewContextInput{ + NumWorkers: numWorkers, + } + ctx, err := v3iohttp.NewContext(logger, newContextInput) + if err != nil { + return nil, err + } + + session, err := ctx.NewSession(&v3io.NewSessionInput{URL: v3ioURL, Username: username, Password: password, AccessKey: accessKey}) + if err != nil { + return nil, errors.Wrap(err, "Failed to create session.") + } + + container, err := session.NewContainer(&v3io.NewContainerInput{ContainerName: containerName}) + if err != nil { + return nil, err + } + return container, nil +} + +func (a *V3ioAdapter) GetSchema() *config.Schema { + return a.partitionMngr.GetConfig() +} + +func (a *V3ioAdapter) GetLogger(child string) logger.Logger { + return a.logger.GetChild(child) +} + +func (a *V3ioAdapter) GetContainer() (v3io.Container, string) { + return a.container, a.cfg.TablePath +} + +func (a *V3ioAdapter) connect() error { + + fullpath := fmt.Sprintf("%s/%s/%s", a.cfg.WebAPIEndpoint, a.cfg.Container, a.cfg.TablePath) + resp, err := a.container.GetObjectSync(&v3io.GetObjectInput{Path: pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName)}) + if err != nil { + if utils.IsNotExistsError(err) { + return errors.Errorf("No TSDB schema file found at '%s'.", fullpath) + } + return errors.Wrapf(err, "Failed to read a TSDB schema from '%s'.", fullpath) + } + + tableSchema := config.Schema{} + err = json.Unmarshal(resp.Body(), &tableSchema) + if err != nil { + return errors.Wrapf(err, "Failed to unmarshal the TSDB schema at '%s', got: %v .", fullpath, string(resp.Body())) + } + + // in order to support backward compatibility we do not fail on version mismatch and only logging warning + if a.cfg.LoadPartitionsFromSchemaAttr && tableSchema.TableSchemaInfo.Version != schema.Version { + a.logger.Warn("Table Schema version mismatch - existing table schema version is %d while the tsdb library version is %d! Make sure to create the table with same library version", + tableSchema.TableSchemaInfo.Version, schema.Version) + } + + a.partitionMngr, err = partmgr.NewPartitionMngr(&tableSchema, a.container, a.cfg) + if err != nil { + return errors.Wrapf(err, "Failed to create a TSDB partition manager at '%s'.", fullpath) + } + err = a.partitionMngr.Init() + if err != nil { + return errors.Wrapf(err, "Failed to initialize the TSDB partition manager at: %s", fullpath) + } + + a.logger.Debug("Running with the following TSDB configuration: %+v\n", a.cfg) + + return nil +} + +func (a *V3ioAdapter) InitAppenderCache() error { + if a.MetricsCache == nil { + a.MetricsCache = appender.NewMetricsCache(a.container, a.logger, a.cfg, a.partitionMngr) + return a.MetricsCache.Start() + } + + return nil +} + +// Create an appender interface, for writing performance +func (a *V3ioAdapter) Appender() (Appender, error) { + err := a.InitAppenderCache() + if err != nil { + return nil, err + } + + newAppender := v3ioAppender{metricsCache: a.MetricsCache} + return newAppender, nil +} + +func (a *V3ioAdapter) StartTime() (int64, error) { + startTime := time.Now().Unix() * 1000 + return startTime - 1000*3600*24*1000, nil // TODO: from config or DB w default +} + +func (a *V3ioAdapter) Close() error { + return nil +} + +// Create a Querier interface, used for time-series queries +func (a *V3ioAdapter) Querier(_ context.Context, mint, maxt int64) (*querier.V3ioQuerier, error) { + if maxt < mint { + return nil, errors.Errorf("End time '%d' is lower than start time '%d'.", maxt, mint) + } + return querier.NewV3ioQuerier(a.container, a.logger, mint, maxt, a.cfg, a.partitionMngr), nil +} + +// Create a Querier interface, used for time-series queries +func (a *V3ioAdapter) QuerierV2() (*pquerier.V3ioQuerier, error) { + return pquerier.NewV3ioQuerier(a.container, a.logger, a.cfg, a.partitionMngr), nil +} + +// Delete by time range can optionally specify metrics and filter by labels +func (a *V3ioAdapter) DeleteDB(deleteParams DeleteParams) error { + if deleteParams.DeleteAll { + // Ignore time boundaries + deleteParams.From = 0 + deleteParams.To = math.MaxInt64 + } else { + if deleteParams.To == 0 { + deleteParams.To = time.Now().Unix() * 1000 + } + } + + // Delete Data + err := a.DeletePartitionsData(&deleteParams) + if err != nil { + return err + } + + // If no data is left, delete Names folder + if len(a.partitionMngr.GetPartitionsPaths()) == 0 { + path := filepath.Join(a.cfg.TablePath, config.NamesDirectory) + "/" // Need a trailing slash + a.logger.Info("Delete metric names at path '%s'.", path) + err := utils.DeleteTable(a.logger, a.container, path, "", a.cfg.QryWorkers) + if err != nil && !deleteParams.IgnoreErrors { + return errors.Wrap(err, "Failed to delete the metric-names table.") + } + // Delete the Directory object + err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) + if err != nil && !deleteParams.IgnoreErrors { + if !utils.IsNotExistsError(err) { + return errors.Wrapf(err, "Failed to delete table object '%s'.", path) + } + } + } + + // If need to 'deleteAll', delete schema + TSDB table folder + if deleteParams.DeleteAll { + // Delete Schema file + schemaPath := pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName) + a.logger.Info("Delete the TSDB configuration at '%s'.", schemaPath) + err := a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: schemaPath}) + if err != nil && !deleteParams.IgnoreErrors { + return errors.New("The configuration at '" + schemaPath + "' cannot be deleted or doesn't exist.") + } + + // Delete the Directory object + path := a.cfg.TablePath + "/" + err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) + if err != nil && !deleteParams.IgnoreErrors { + if !utils.IsNotExistsError(err) { + return errors.Wrapf(err, "Failed to delete table object '%s'.", path) + } + } + } + + return nil +} + +func (a *V3ioAdapter) DeletePartitionsData(deleteParams *DeleteParams) error { + partitions := a.partitionMngr.PartsForRange(deleteParams.From, deleteParams.To, true) + var entirelyDeletedPartitions []*partmgr.DBPartition + + deleteWholePartition := deleteParams.DeleteAll || (deleteParams.Filter == "" && len(deleteParams.Metrics) == 0) + + fileToDeleteChan := make(chan v3io.Item, 1024) + getItemsTerminationChan := make(chan error, len(partitions)) + deleteTerminationChan := make(chan error, a.cfg.Workers) + numOfGetItemsRoutines := len(partitions) + if len(deleteParams.Metrics) > 0 { + numOfGetItemsRoutines = numOfGetItemsRoutines * len(deleteParams.Metrics) + } + goRoutinesNum := numOfGetItemsRoutines + a.cfg.Workers + onErrorTerminationChannel := make(chan struct{}, goRoutinesNum) + systemAttributesToFetch := []string{config.ObjectNameAttrName, config.MtimeSecsAttributeName, config.MtimeNSecsAttributeName, config.EncodingAttrName, config.MaxTimeAttrName} + var getItemsWorkers, getItemsTerminated, deletesTerminated int + + var getItemsWG sync.WaitGroup + getItemsErrorChan := make(chan error, numOfGetItemsRoutines) + + aggregates := a.GetSchema().PartitionSchemaInfo.Aggregates + hasServerSideAggregations := len(aggregates) != 1 || aggregates[0] != "" + + var aggrMask aggregate.AggrType + var err error + if hasServerSideAggregations { + aggrMask, _, err = aggregate.AggregatesFromStringListWithCount(aggregates) + if err != nil { + return err + } + } + + for i := 0; i < a.cfg.Workers; i++ { + go deleteObjectWorker(a.container, deleteParams, a.logger, + fileToDeleteChan, deleteTerminationChan, onErrorTerminationChannel, + aggrMask) + } + + for _, part := range partitions { + partitionEntirelyInRange := deleteParams.From <= part.GetStartTime() && deleteParams.To >= part.GetEndTime() + deleteEntirePartitionFolder := partitionEntirelyInRange && deleteWholePartition + + // Delete all files in partition folder and then delete the folder itself + if deleteEntirePartitionFolder { + a.logger.Info("Deleting entire partition '%s'.", part.GetTablePath()) + + getItemsWG.Add(1) + go deleteEntirePartition(a.logger, a.container, part.GetTablePath(), a.cfg.QryWorkers, + &getItemsWG, getItemsErrorChan) + + entirelyDeletedPartitions = append(entirelyDeletedPartitions, part) + // First get all items based on filter+metric+time range then delete what is necessary + } else { + a.logger.Info("Deleting partial partition '%s'.", part.GetTablePath()) + + start, end := deleteParams.From, deleteParams.To + + // Round the start and end times to the nearest aggregation buckets - to later on recalculate server side aggregations + if hasServerSideAggregations { + start = part.GetAggregationBucketStartTime(part.Time2Bucket(deleteParams.From)) + end = part.GetAggregationBucketEndTime(part.Time2Bucket(deleteParams.To)) + } + + var chunkAttributesToFetch []string + + // If we don't want to delete the entire object, fetch also the desired chunks to delete. + if !partitionEntirelyInRange { + chunkAttributesToFetch, _ = part.Range2Attrs("v", start, end) + } + + allAttributes := append(chunkAttributesToFetch, systemAttributesToFetch...) + if len(deleteParams.Metrics) == 0 { + getItemsWorkers++ + input := &v3io.GetItemsInput{Path: part.GetTablePath(), + AttributeNames: allAttributes, + Filter: deleteParams.Filter} + go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) + } else { + for _, metric := range deleteParams.Metrics { + for _, shardingKey := range part.GetShardingKeys(metric) { + getItemsWorkers++ + input := &v3io.GetItemsInput{Path: part.GetTablePath(), + AttributeNames: allAttributes, + Filter: deleteParams.Filter, + ShardingKey: shardingKey} + go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) + } + } + } + } + } + a.logger.Debug("issued %v getItems", getItemsWorkers) + + // Waiting fot deleting of full partitions + getItemsWG.Wait() + select { + case err = <-getItemsErrorChan: + // Signal all other goroutines to quite + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return err + default: + } + + if getItemsWorkers != 0 { + for deletesTerminated < a.cfg.Workers { + select { + case err := <-getItemsTerminationChan: + a.logger.Debug("finished getItems worker, total finished: %v, error: %v", getItemsTerminated+1, err) + if err != nil { + // If requested to ignore non-existing tables do not return error. + if !(deleteParams.IgnoreErrors && utils.IsNotExistsOrConflictError(err)) { + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return errors.Wrapf(err, "GetItems failed during recursive delete.") + } + } + getItemsTerminated++ + + if getItemsTerminated == getItemsWorkers { + close(fileToDeleteChan) + } + case err := <-deleteTerminationChan: + a.logger.Debug("finished delete worker, total finished: %v, err: %v", deletesTerminated+1, err) + if err != nil { + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return errors.Wrapf(err, "Delete failed during recursive delete.") + } + deletesTerminated++ + } + } + } else { + close(fileToDeleteChan) + } + + a.logger.Debug("finished deleting data, removing partitions from schema") + err = a.partitionMngr.DeletePartitionsFromSchema(entirelyDeletedPartitions) + if err != nil { + return err + } + + return nil +} + +func deleteEntirePartition(logger logger.Logger, container v3io.Container, partitionPath string, workers int, + wg *sync.WaitGroup, errChannel chan<- error) { + defer wg.Done() + + err := utils.DeleteTable(logger, container, partitionPath, "", workers) + if err != nil { + errChannel <- errors.Wrapf(err, "Failed to delete partition '%s'.", partitionPath) + return + } + // Delete the Directory object + err = container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: partitionPath}) + if err != nil && !utils.IsNotExistsError(err) { + errChannel <- errors.Wrapf(err, "Failed to delete partition folder '%s'.", partitionPath) + } +} + +func getItemsWorker(logger logger.Logger, container v3io.Container, input *v3io.GetItemsInput, partition *partmgr.DBPartition, + filesToDeleteChan chan<- v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}) { + for { + select { + case _ = <-onErrorTerminationChannel: + terminationChan <- nil + return + default: + } + + logger.Debug("going to getItems for partition '%v', input: %v", partition.GetTablePath(), *input) + resp, err := container.GetItemsSync(input) + if err != nil { + terminationChan <- err + return + } + resp.Release() + output := resp.Output.(*v3io.GetItemsOutput) + + for _, item := range output.Items { + item["partition"] = partition + + // In case we got error on delete while iterating getItems response + select { + case _ = <-onErrorTerminationChannel: + terminationChan <- nil + return + default: + } + + filesToDeleteChan <- item + } + if output.Last { + terminationChan <- nil + return + } + input.Marker = output.NextMarker + } +} + +func deleteObjectWorker(container v3io.Container, deleteParams *DeleteParams, logger logger.Logger, + filesToDeleteChannel <-chan v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}, + aggrMask aggregate.AggrType) { + for { + select { + case _ = <-onErrorTerminationChannel: + return + case itemToDelete, ok := <-filesToDeleteChannel: + if !ok { + terminationChan <- nil + return + } + + currentPartition := itemToDelete.GetField("partition").(*partmgr.DBPartition) + fileName, err := itemToDelete.GetFieldString(config.ObjectNameAttrName) + if err != nil { + terminationChan <- err + return + } + fullFileName := pathUtil.Join(currentPartition.GetTablePath(), fileName) + + // Delete whole object + if deleteParams.From <= currentPartition.GetStartTime() && + deleteParams.To >= currentPartition.GetEndTime() { + + logger.Debug("delete entire item '%v' ", fullFileName) + input := &v3io.DeleteObjectInput{Path: fullFileName} + err = container.DeleteObjectSync(input) + if err != nil && !utils.IsNotExistsOrConflictError(err) { + terminationChan <- err + return + } + // Delete partial object - specific chunks or sub-parts of chunks + } else { + mtimeSecs, err := itemToDelete.GetFieldInt(config.MtimeSecsAttributeName) + if err != nil { + terminationChan <- err + return + } + mtimeNSecs, err := itemToDelete.GetFieldInt(config.MtimeNSecsAttributeName) + if err != nil { + terminationChan <- err + return + } + + deleteUpdateExpression := strings.Builder{} + dataEncoding, err := getEncoding(itemToDelete) + if err != nil { + terminationChan <- err + return + } + + var aggregationsByBucket map[int]*aggregate.AggregatesList + if aggrMask != 0 { + aggregationsByBucket = make(map[int]*aggregate.AggregatesList) + aggrBuckets := currentPartition.Times2BucketRange(deleteParams.From, deleteParams.To) + for _, bucketID := range aggrBuckets { + aggregationsByBucket[bucketID] = aggregate.NewAggregatesList(aggrMask) + } + } + + var newMaxTime int64 = math.MaxInt64 + var numberOfExpressionsInUpdate int + for attributeName, value := range itemToDelete { + if strings.HasPrefix(attributeName, "_v") { + // Check whether the whole chunk attribute needed to be deleted or just part of it. + if currentPartition.IsChunkInRangeByAttr(attributeName, deleteParams.From, deleteParams.To) { + deleteUpdateExpression.WriteString("delete(") + deleteUpdateExpression.WriteString(attributeName) + deleteUpdateExpression.WriteString(");") + } else { + currentChunksMaxTime, err := generatePartialChunkDeleteExpression(logger, &deleteUpdateExpression, attributeName, + value.([]byte), dataEncoding, deleteParams, currentPartition, aggregationsByBucket) + if err != nil { + terminationChan <- err + return + } + + // We want to save the earliest max time possible + if currentChunksMaxTime < newMaxTime { + newMaxTime = currentChunksMaxTime + } + } + numberOfExpressionsInUpdate++ + } + } + + dbMaxTime := int64(itemToDelete.GetField(config.MaxTimeAttrName).(int)) + + // Update the partition's max time if needed. + if deleteParams.From < dbMaxTime && deleteParams.To >= dbMaxTime { + if deleteParams.From < newMaxTime { + newMaxTime = deleteParams.From + } + + deleteUpdateExpression.WriteString(fmt.Sprintf("%v=%v;", config.MaxTimeAttrName, newMaxTime)) + } + + if deleteUpdateExpression.Len() > 0 { + // If there are server aggregates, update the needed buckets + if aggrMask != 0 { + for bucket, aggregations := range aggregationsByBucket { + numberOfExpressionsInUpdate = numberOfExpressionsInUpdate + len(*aggregations) + + // Due to engine limitation, If we reached maximum number of expressions in an UpdateItem + // we need to break the update into chunks + // TODO: refactor in 2.8: + // in 2.8 there is a better way of doing it by uniting multiple update expressions into + // one expression by range in a form similar to `_v_sum[15...100]=0` + if numberOfExpressionsInUpdate < maxExpressionsInUpdateItem { + deleteUpdateExpression.WriteString(aggregations.SetExpr("v", bucket)) + } else { + exprStr := deleteUpdateExpression.String() + logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) + mtimeSecs, mtimeNSecs, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) + if err != nil { + terminationChan <- err + return + } + + // Reset stuff for next update iteration + numberOfExpressionsInUpdate = 0 + deleteUpdateExpression.Reset() + } + } + } + + // If any expressions are left, save them + if deleteUpdateExpression.Len() > 0 { + exprStr := deleteUpdateExpression.String() + logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) + _, _, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) + if err != nil { + terminationChan <- err + return + } + } + } + } + } + } +} + +func sendUpdateItem(path, expr string, mtimeSecs, mtimeNSecs int, container v3io.Container) (int, int, error) { + condition := fmt.Sprintf("%v == %v and %v == %v", + config.MtimeSecsAttributeName, mtimeSecs, + config.MtimeNSecsAttributeName, mtimeNSecs) + + input := &v3io.UpdateItemInput{Path: path, + Expression: &expr, + Condition: condition} + + response, err := container.UpdateItemSync(input) + if err != nil && !utils.IsNotExistsOrConflictError(err) { + returnError := err + if isFalseConditionError(err) { + returnError = errors.Wrapf(err, "Item '%v' was updated while deleting occurred. Please disable any ingestion and retry.", path) + } + return 0, 0, returnError + } + + output := response.Output.(*v3io.UpdateItemOutput) + return output.MtimeSecs, output.MtimeNSecs, nil +} + +func getEncoding(itemToDelete v3io.Item) (chunkenc.Encoding, error) { + var encoding chunkenc.Encoding + encodingStr, ok := itemToDelete.GetField(config.EncodingAttrName).(string) + // If we don't have the encoding attribute, use XOR as default. (for backwards compatibility) + if !ok { + encoding = chunkenc.EncXOR + } else { + intEncoding, err := strconv.Atoi(encodingStr) + if err != nil { + return 0, fmt.Errorf("error parsing encoding type of chunk, got: %v, error: %v", encodingStr, err) + } + encoding = chunkenc.Encoding(intEncoding) + } + + return encoding, nil +} + +func generatePartialChunkDeleteExpression(logger logger.Logger, expr *strings.Builder, + attributeName string, value []byte, encoding chunkenc.Encoding, deleteParams *DeleteParams, + partition *partmgr.DBPartition, aggregationsByBucket map[int]*aggregate.AggregatesList) (int64, error) { + chunk, err := chunkenc.FromData(logger, encoding, value, 0) + if err != nil { + return 0, err + } + + newChunk := chunkenc.NewChunk(logger, encoding == chunkenc.EncVariant) + appender, err := newChunk.Appender() + if err != nil { + return 0, err + } + + var currentMaxTime int64 + var remainingItemsCount int + iter := chunk.Iterator() + for iter.Next() { + var t int64 + var v interface{} + if encoding == chunkenc.EncXOR { + t, v = iter.At() + } else { + t, v = iter.AtString() + } + + // Append back only events that are not in the delete range + if t < deleteParams.From || t > deleteParams.To { + remainingItemsCount++ + appender.Append(t, v) + + // Calculate server-side aggregations + if aggregationsByBucket != nil { + currentAgg, ok := aggregationsByBucket[partition.Time2Bucket(t)] + // A chunk may contain more data then needed for the aggregations, if this is the case do not aggregate + if ok { + currentAgg.Aggregate(t, v) + } + } + + // Update current chunk's new max time + if t > currentMaxTime { + currentMaxTime = t + } + } + } + + if remainingItemsCount == 0 { + expr.WriteString("delete(") + expr.WriteString(attributeName) + expr.WriteString(");") + currentMaxTime, _ = partition.GetChunkStartTimeByAttr(attributeName) + } else { + bytes := appender.Chunk().Bytes() + val := base64.StdEncoding.EncodeToString(bytes) + + expr.WriteString(fmt.Sprintf("%s=blob('%s'); ", attributeName, val)) + } + + return currentMaxTime, nil + +} + +// Return the number of items in a TSDB table +func (a *V3ioAdapter) CountMetrics(part string) (int, error) { + count := 0 + paths := a.partitionMngr.GetPartitionsPaths() + for _, path := range paths { + input := v3io.GetItemsInput{Path: path, Filter: "", AttributeNames: []string{"__size"}} + iter, err := utils.NewAsyncItemsCursor(a.container, &input, a.cfg.QryWorkers, []string{}, a.logger) + if err != nil { + return 0, err + } + + for iter.Next() { + count++ + } + if iter.Err() != nil { + return count, errors.Wrap(iter.Err(), "Failed on count iterator.") + } + } + + return count, nil +} + +type v3ioAppender struct { + metricsCache *appender.MetricsCache +} + +// Add a t/v value to a metric item and return refID (for AddFast) +func (a v3ioAppender) Add(lset utils.Labels, t int64, v interface{}) (uint64, error) { + return a.metricsCache.Add(lset, t, v) +} + +// Faster Add using refID obtained from Add (avoid some hash/lookup overhead) +func (a v3ioAppender) AddFast(lset utils.Labels, ref uint64, t int64, v interface{}) error { + return a.metricsCache.AddFast(ref, t, v) +} + +// Wait for completion of all updates +func (a v3ioAppender) WaitForCompletion(timeout time.Duration) (int, error) { + return a.metricsCache.WaitForCompletion(timeout) +} + +func (a v3ioAppender) Close() { + a.metricsCache.Close() +} + +// In V3IO, all operations are committed (no client cache) +func (a v3ioAppender) Commit() error { return nil } +func (a v3ioAppender) Rollback() error { return nil } + +// The Appender interface provides batched appends against a storage. +type Appender interface { + Add(l utils.Labels, t int64, v interface{}) (uint64, error) + AddFast(l utils.Labels, ref uint64, t int64, v interface{}) error + WaitForCompletion(timeout time.Duration) (int, error) + Commit() error + Rollback() error + Close() +} + +// Check if the current error was caused specifically because the condition was evaluated to false. +func isFalseConditionError(err error) bool { + errString := err.Error() + + if strings.Count(errString, errorCodeString) == 2 && + strings.Contains(errString, falseConditionOuterErrorCode) && + strings.Contains(errString, falseConditionInnerErrorCode) { + return true + } + + return false +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go new file mode 100644 index 00000000..af0888ae --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go @@ -0,0 +1,1133 @@ +// +build integration + +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package tsdb_test + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/aggregate" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + . "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const defaultStepMs = 5 * tsdbtest.MinuteInMillis // 5 minutes + +func TestIngestData(t *testing.T) { + timestamp := fmt.Sprintf("%d", time.Now().Unix()) //time.Now().Format(time.RFC3339) + testCases := []struct { + desc string + params tsdbtest.TestParams + }{ + {desc: "Should ingest one data point", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}}, + }}}, + ), + }, + {desc: "Should ingest multiple data points", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}, + }}}, + ), + }, + {desc: "Should ingest record with late arrival same chunk", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 - 10, Value: 3234.6}}, + }}}, + ), + }, + {desc: "Should ingest into first partition in epoch without corruption (TSDB-67)", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "coolcpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 10, Value: 314.3}, + }, + }}}, + ), + }, + {desc: "Should drop values of incompatible data types ", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "IG13146", + Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), + Data: []tsdbtest.DataPoint{ + {Time: 15, Value: 0.1}, // first add float value + {Time: 20, Value: "some string value"}, // then attempt to add string value + {Time: 30, Value: 0.2}, // and finally add another float value + }, + ExpectedCount: func() *int { var expectedCount = 2; return &expectedCount }(), + }}}, + tsdbtest.TestOption{ + Key: "override_test_name", + Value: fmt.Sprintf("IG-13146-%s", timestamp)}), + }, + {desc: "IG-13146: Should reject values of incompatible data types without data corruption", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "IG13146", + Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), + Data: []tsdbtest.DataPoint{ + {Time: 50, Value: "another string value"}, // then attempt to add string value + {Time: 60, Value: 0.4}, // valid values from this batch will be dropped + {Time: 70, Value: 0.3}, // because processing of entire batch will stop + }, + ExpectedCount: func() *int { var expectedCount = 1; return &expectedCount }(), + }}}, + tsdbtest.TestOption{ + Key: "override_test_name", + Value: fmt.Sprintf("IG-13146-%s", timestamp)}, + tsdbtest.TestOption{ + Key: "expected_error_contains_string", + // Note, the expected error message should align with pkg/appender/ingest.go:308 + Value: "trying to ingest values of incompatible data type"}), + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.params.IgnoreReason() != "" { + t.Skip(test.params.IgnoreReason()) + } + testIngestDataCase(t, test.params) + }) + } +} + +func testIngestDataCase(t *testing.T, testParams tsdbtest.TestParams) { + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + for _, dp := range testParams.TimeSeries() { + sort.Sort(tsdbtest.DataPointTimeSorter(dp.Data)) + from := dp.Data[0].Time + to := dp.Data[len(dp.Data)-1].Time + + labels := utils.Labels{utils.Label{Name: "__name__", Value: dp.Name}} + labels = append(labels, dp.Labels...) + + ref, err := appender.Add(labels, dp.Data[0].Time, dp.Data[0].Value) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + for _, curr := range dp.Data[1:] { + appender.AddFast(labels, ref, curr.Time, curr.Value) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + if !isExpected(testParams, err) { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + } + + expectedCount := len(dp.Data) + if dp.ExpectedCount != nil { + expectedCount = *dp.ExpectedCount + } + tsdbtest.ValidateCountOfSamples(t, adapter, dp.Name, expectedCount, from, to, -1) + } +} + +func isExpected(testParams tsdbtest.TestParams, actualErr error) bool { + if errMsg, ok := testParams["expected_error_contains_string"]; ok { + return strings.Contains(actualErr.Error(), fmt.Sprintf("%v", errMsg)) + } + return false +} + +func TestIngestDataWithSameTimestamp(t *testing.T) { + baseTime := int64(1532209200000) + testParams := tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: []tsdbtest.DataPoint{ + {Time: baseTime, Value: 1}, + {Time: baseTime, Value: 2}}}, + tsdbtest.Metric{ + Name: "cpu1", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: []tsdbtest.DataPoint{ + {Time: baseTime, Value: 2}, {Time: baseTime, Value: 3}}}, + }}) + + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + for _, dp := range testParams.TimeSeries() { + labels := utils.Labels{utils.Label{Name: "__name__", Value: dp.Name}} + labels = append(labels, dp.Labels...) + + ref, err := appender.Add(labels, dp.Data[0].Time, dp.Data[0].Value) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + for _, curr := range dp.Data[1:] { + appender.AddFast(labels, ref, curr.Time, curr.Value) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + } + + tsdbtest.ValidateCountOfSamples(t, adapter, "", 2, baseTime-1*tsdbtest.HoursInMillis, baseTime+1*tsdbtest.HoursInMillis, -1) +} + +// test for http://jira.iguazeng.com:8080/browse/IG-14978 +func TestIngestWithTimeDeltaBiggerThen32Bit(t *testing.T) { + data := []tsdbtest.DataPoint{ + {Time: 1384786967945, Value: 1.0}, + {Time: 1392818567945, Value: 2.0}} + testParams := tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: data}, + }}) + + schema, err := schema.NewSchema(testParams.V3ioConfig(), "1/h", "1h", "", "") + defer tsdbtest.SetUpWithDBConfig(t, schema, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + for _, dp := range testParams.TimeSeries() { + labels := utils.Labels{utils.Label{Name: "__name__", Value: dp.Name}} + labels = append(labels, dp.Labels...) + + ref, err := appender.Add(labels, dp.Data[0].Time, dp.Data[0].Value) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + for _, curr := range dp.Data[1:] { + appender.AddFast(labels, ref, curr.Time, curr.Value) + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + } + + querier, _ := adapter.QuerierV2() + iter, _ := querier.Select(&pquerier.SelectParams{From: 0, + To: time.Now().Unix() * 1000}) + for iter.Next() { + dataIter := iter.At().Iterator() + actual, err := iteratorToSlice(dataIter) + if err != nil { + t.Fatal(err) + } + + assert.ElementsMatch(t, data, actual, + "result data didn't match. \nExpected: %v\n Actual: %v", data, actual) + } + + if iter.Err() != nil { + t.Fatal(err) + } +} + +func TestIngestVarTypeWithTimeDeltaBiggerThen32Bit(t *testing.T) { + data := []string{"a", "b"} + times := []int64{1384786967945, 1392818567945} + + testParams := tsdbtest.NewTestParams(t) + + schema, err := schema.NewSchema(testParams.V3ioConfig(), "1/h", "1h", "", "") + defer tsdbtest.SetUpWithDBConfig(t, schema, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + labels := utils.Labels{utils.Label{Name: "__name__", Value: "metric_1"}} + for i, v := range data { + _, err := appender.Add(labels, times[i], v) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + + querier, _ := adapter.QuerierV2() + iter, _ := querier.Select(&pquerier.SelectParams{From: 0, + To: time.Now().Unix() * 1000}) + var seriesCount int + for iter.Next() { + seriesCount++ + iter := iter.At().Iterator() + var i int + for iter.Next() { + time, value := iter.AtString() + assert.Equal(t, times[i], time, "time does not match at index %v", i) + assert.Equal(t, data[i], value, "value does not match at index %v", i) + i++ + } + } + + assert.Equal(t, 1, seriesCount, "series count didn't match expected") + + if iter.Err() != nil { + t.Fatal(err) + } +} + +func TestWriteMetricWithDashInName(t *testing.T) { + testParams := tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu-1", + Labels: utils.LabelsFromStringList("testLabel", "balbala"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 314.3}}, + }}}) + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + for _, dp := range testParams.TimeSeries() { + labels := utils.Labels{utils.Label{Name: "__name__", Value: dp.Name}} + labels = append(labels, dp.Labels...) + + _, err := appender.Add(labels, dp.Data[0].Time, dp.Data[0].Value) + if err == nil { + t.Fatalf("Test should have failed") + } + } +} + +func TestQueryData(t *testing.T) { + testCases := []struct { + desc string + testParams tsdbtest.TestParams + filter string + aggregates string + from int64 + to int64 + step int64 + expected map[string][]tsdbtest.DataPoint + ignoreReason string + expectFail bool + }{ + {desc: "Should ingest and query one data point", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("testLabel", "balbala"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 314.3}}, + }}}, + ), + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510, Value: 314.3}}}}, + + {desc: "Should ingest and query multiple data points", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510 - 10, Value: 314.3}, + {Time: 1532940510 - 5, Value: 300.3}, + {Time: 1532940510, Value: 3234.6}}, + }}}, + ), + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510 - 10, Value: 314.3}, + {Time: 1532940510 - 5, Value: 300.3}, + {Time: 1532940510, Value: 3234.6}}}}, + + {desc: "Should query with filter on metric name", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 33.3}}, + }}}, + ), + filter: "_name=='cpu'", + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510, Value: 33.3}}}}, + + {desc: "Should query with filter on label name", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 31.3}}, + }}}, + ), + filter: "os=='linux'", + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510, Value: 31.3}}}}, + + {desc: "Should ingest and query by time", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}, + }}}, + ), + from: 1532940510 + 2, + to: 1532940510 + 12, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{"": {{Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}}}, + + {desc: "Should ingest and query by time with no results", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}, + }}}, + ), + from: 1532940510 + 1, + to: 1532940510 + 4, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{}}, + + {desc: "Should ingest and query an aggregate", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 300.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 100.4}}, + }}}, + ), + from: 1532940510, + to: 1532940510 + 11, + step: defaultStepMs, + aggregates: "sum", + expected: map[string][]tsdbtest.DataPoint{"sum": {{Time: 1532940510, Value: 701.0}}}}, + + {desc: "Should ingest and query an aggregate with interval greater than step size", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 300.3}, + {Time: 1532940510 + 60, Value: 300.3}, + {Time: 1532940510 + 2*60, Value: 100.4}, + {Time: 1532940510 + 5*60, Value: 200.0}}, + }}}, + ), + from: 1532940510, + to: 1532940510 + 6*60, + step: defaultStepMs, + aggregates: "sum", + expected: map[string][]tsdbtest.DataPoint{"sum": {{Time: 1532940510, Value: 901.0}}}}, + + {desc: "Should ingest and query multiple aggregates", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 300.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 100.4}}, + }}}, + ), + from: 1532940510, + to: 1532940510 + 11, + step: defaultStepMs, + aggregates: "sum,count", + expected: map[string][]tsdbtest.DataPoint{"sum": {{Time: 1532940510, Value: 701.0}}, + "count": {{Time: 1532940510, Value: 3}}}}, + + {desc: "Should fail on query with illegal time (switch from and to)", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 314.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 3234.6}}, + }}}, + ), + from: 1532940510 + 1, + to: 0, + step: defaultStepMs, + expectFail: true, + }, + + {desc: "Should query with filter on not existing metric name", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 33.3}}, + }}}, + ), + filter: "_name=='hahaha'", + from: 0, + to: 1532940510 + 1, + step: defaultStepMs, + expected: map[string][]tsdbtest.DataPoint{}}, + + {desc: "Should ingest and query aggregates with empty bucket", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1537972278402, Value: 300.3}, + {Time: 1537972278402 + 8*tsdbtest.MinuteInMillis, Value: 300.3}, + {Time: 1537972278402 + 9*tsdbtest.MinuteInMillis, Value: 100.4}}, + }}}, + ), + from: 1537972278402 - 5*tsdbtest.MinuteInMillis, + to: 1537972278402 + 10*tsdbtest.MinuteInMillis, + step: defaultStepMs, + aggregates: "count", + expected: map[string][]tsdbtest.DataPoint{ + "count": {{Time: 1537972278402, Value: 1}, + {Time: 1537972578402, Value: 2}}}}, + + {desc: "Should ingest and query aggregates with few empty buckets in a row", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1537972278402, Value: 300.3}, + {Time: 1537972278402 + 16*tsdbtest.MinuteInMillis, Value: 300.3}, + {Time: 1537972278402 + 17*tsdbtest.MinuteInMillis, Value: 100.4}}, + }}}, + ), + from: 1537972278402 - 5*tsdbtest.MinuteInMillis, + to: 1537972278402 + 18*tsdbtest.MinuteInMillis, + step: defaultStepMs, + aggregates: "count", + expected: map[string][]tsdbtest.DataPoint{ + "count": {{Time: 1537972158402, Value: 1}, + {Time: 1537973058402, Value: 2}}}}, + + {desc: "Should ingest and query server-side aggregates", + testParams: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + Data: []tsdbtest.DataPoint{ + {Time: 1532940510, Value: 300.3}, + {Time: 1532940510 + 5, Value: 300.3}, + {Time: 1532940510 + 10, Value: 100.4}}, + }}}, + ), + from: 1532940510, + to: 1532940510 + 11, + step: 60 * tsdbtest.MinuteInMillis, + aggregates: "sum,count,min,max,sqr,last", + expected: map[string][]tsdbtest.DataPoint{"sum": {{Time: 1532940510, Value: 701.0}}, + "count": {{Time: 1532940510, Value: 3}}, + "min": {{Time: 1532940510, Value: 100.4}}, + "max": {{Time: 1532940510, Value: 300.3}}, + "sqr": {{Time: 1532940510, Value: 190440.3}}, + "last": {{Time: 1532940510, Value: 100.4}}}}, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testQueryDataCase(t, test.testParams, test.filter, test.aggregates, test.from, test.to, test.step, test.expected, test.expectFail) + }) + } +} + +func testQueryDataCase(test *testing.T, testParams tsdbtest.TestParams, filter string, queryAggregates string, + from int64, to int64, step int64, expected map[string][]tsdbtest.DataPoint, expectFail bool) { + + adapter, teardown := tsdbtest.SetUpWithData(test, testParams) + defer teardown() + + qry, err := adapter.Querier(nil, from, to) + if err != nil { + if expectFail { + return + } else { + test.Fatalf("Failed to create Querier. reason: %v", err) + } + } + + for _, metric := range testParams.TimeSeries() { + set, err := qry.Select(metric.Name, queryAggregates, step, filter) + if err != nil { + test.Fatalf("Failed to run Select. reason: %v", err) + } + + var counter int + for counter = 0; set.Next(); counter++ { + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + + series := set.At() + currentAggregate := series.Labels().Get(aggregate.AggregateLabel) + iter := series.Iterator() + if iter.Err() != nil { + test.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + + actual, err := iteratorToSlice(iter) + if err != nil { + test.Fatal(err) + } + + for _, data := range expected[currentAggregate] { + var equalCount = 0 + for _, dp := range actual { + if dp.Equals(data) { + equalCount++ + continue + } + } + assert.Equal(test, equalCount, len(expected[currentAggregate]), + "Check failed for aggregate='%s'. Query aggregates: %s", currentAggregate, queryAggregates) + } + } + + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + if counter == 0 && len(expected) > 0 { + test.Fatalf("No data was received") + } + } +} + +func TestQueryDataOverlappingWindow(t *testing.T) { + v3ioConfig, err := config.GetOrDefaultConfig() + if err != nil { + t.Fatalf("unable to load configuration. Error: %v", err) + } + + testCases := []struct { + desc string + metricName string + labels []utils.Label + data []tsdbtest.DataPoint + filter string + aggregates string + windows []int + from int64 + to int64 + expected map[string][]tsdbtest.DataPoint + ignoreReason string + }{ + {desc: "Should ingest and query with windowing", + metricName: "cpu", + labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 314.3}, + {Time: 1532944110, Value: 314.3}, + {Time: 1532947710, Value: 300.3}, + {Time: 1532951310, Value: 3234.6}}, + from: 0, to: 1532954910, + windows: []int{1, 2, 4}, + aggregates: "sum", + expected: map[string][]tsdbtest.DataPoint{ + "sum": {{Time: 1532937600, Value: 4163.5}, + {Time: 1532944800, Value: 3534.9}, + {Time: 1532948400, Value: 3234.6}}}, + }, + + {desc: "Should ingest and query with windowing on multiple agg", + metricName: "cpu", + labels: utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease"), + data: []tsdbtest.DataPoint{{Time: 1532940510, Value: 314.3}, + {Time: 1532944110, Value: 314.3}, + {Time: 1532947710, Value: 300.3}, + {Time: 1532951310, Value: 3234.6}}, + from: 0, to: 1532954910, + windows: []int{1, 2, 4}, + aggregates: "sum,count,sqr", + expected: map[string][]tsdbtest.DataPoint{ + "sum": {{Time: 1532937600, Value: 4163.5}, + {Time: 1532944800, Value: 3534.9}, + {Time: 1532948400, Value: 3234.6}}, + "count": {{Time: 1532937600, Value: 4}, + {Time: 1532944800, Value: 2}, + {Time: 1532948400, Value: 1}}, + "sqr": {{Time: 1532937600, Value: 10750386.23}, + {Time: 1532944800, Value: 10552817.25}, + {Time: 1532948400, Value: 10462637.16}}, + }, + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testQueryDataOverlappingWindowCase(t, v3ioConfig, test.metricName, test.labels, + test.data, test.filter, test.windows, test.aggregates, test.from, test.to, test.expected) + }) + } +} + +func testQueryDataOverlappingWindowCase(test *testing.T, v3ioConfig *config.V3ioConfig, + metricsName string, userLabels []utils.Label, data []tsdbtest.DataPoint, filter string, + windows []int, agg string, + from int64, to int64, expected map[string][]tsdbtest.DataPoint) { + + testParams := tsdbtest.NewTestParams(test, + tsdbtest.TestOption{Key: tsdbtest.OptV3ioConfig, Value: v3ioConfig}, + tsdbtest.TestOption{Key: tsdbtest.OptTimeSeries, Value: tsdbtest.TimeSeries{tsdbtest.Metric{Name: metricsName, Data: data, Labels: userLabels}}}, + ) + + adapter, teardown := tsdbtest.SetUpWithData(test, testParams) + defer teardown() + + var step int64 = 3600 + + qry, err := adapter.Querier(nil, from, to) + if err != nil { + test.Fatalf("Failed to create Querier. reason: %v", err) + } + + set, err := qry.SelectOverlap(metricsName, agg, step, windows, filter) + if err != nil { + test.Fatalf("Failed to run Select. reason: %v", err) + } + + var counter int + for counter = 0; set.Next(); counter++ { + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + + series := set.At() + agg := series.Labels().Get(aggregate.AggregateLabel) + iter := series.Iterator() + if iter.Err() != nil { + test.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + + actual, err := tsdbtest.IteratorToSlice(iter) + if err != nil { + test.Fatal(err) + } + assert.EqualValues(test, len(windows), len(actual)) + for _, data := range expected[agg] { + var equalCount = 0 + for _, dp := range actual { + if dp.Equals(data) { + equalCount++ + continue + } + } + assert.Equal(test, equalCount, len(expected[agg])) + } + } + + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + if counter == 0 && len(expected) > 0 { + test.Fatalf("No data was received") + } +} + +// Calling Seek instead of next for the first time while iterating over data (TSDB-43) +func TestIgnoreNaNWhenSeekingAggSeries(t *testing.T) { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + if err != nil { + t.Fatalf("unable to load configuration. Error: %v", err) + } + metricsName := "cpu" + baseTime := int64(1532940510000) + userLabels := utils.LabelsFromStringList("os", "linux", "iguaz", "yesplease") + data := []tsdbtest.DataPoint{{Time: baseTime, Value: 300.3}, + {Time: baseTime + tsdbtest.MinuteInMillis, Value: 300.3}, + {Time: baseTime + 2*tsdbtest.MinuteInMillis, Value: 100.4}, + {Time: baseTime + 5*tsdbtest.MinuteInMillis, Value: 200.0}} + from := int64(baseTime - 60*tsdbtest.MinuteInMillis) + to := int64(baseTime + 6*tsdbtest.MinuteInMillis) + step := int64(2 * tsdbtest.MinuteInMillis) + agg := "avg" + expected := map[string][]tsdbtest.DataPoint{ + "avg": {{baseTime, 300.3}, + {baseTime + step, 100.4}, + {baseTime + 2*step, 200}}} + + testParams := tsdbtest.NewTestParams(t, + tsdbtest.TestOption{Key: tsdbtest.OptV3ioConfig, Value: v3ioConfig}, + tsdbtest.TestOption{Key: tsdbtest.OptTimeSeries, Value: tsdbtest.TimeSeries{tsdbtest.Metric{Name: metricsName, Data: data, Labels: userLabels}}}, + ) + + adapter, teardown := tsdbtest.SetUpWithData(t, testParams) + defer teardown() + + qry, err := adapter.Querier(nil, from, to) + if err != nil { + t.Fatalf("Failed to create Querier. reason: %v", err) + } + + set, err := qry.Select(metricsName, agg, step, "") + if err != nil { + t.Fatalf("Failed to run Select. reason: %v", err) + } + + var counter int + for counter = 0; set.Next(); counter++ { + if set.Err() != nil { + t.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + + series := set.At() + agg := series.Labels().Get(aggregate.AggregateLabel) + iter := series.Iterator() + if iter.Err() != nil { + t.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + if !iter.Seek(0) { + t.Fatal("Seek time returned false, iterator error:", iter.Err()) + } + var actual []tsdbtest.DataPoint + t0, v0 := iter.At() + if iter.Err() != nil { + t.Fatal("error iterating over series", iter.Err()) + } + actual = append(actual, tsdbtest.DataPoint{Time: t0, Value: v0}) + for iter.Next() { + t1, v1 := iter.At() + + if iter.Err() != nil { + t.Fatal("error iterating over series", iter.Err()) + } + actual = append(actual, tsdbtest.DataPoint{Time: t1, Value: v1}) + } + + for _, data := range expected[agg] { + var equalCount = 0 + for _, dp := range actual { + if dp.Equals(data) { + equalCount++ + continue + } + } + assert.Equal(t, equalCount, len(expected[agg])) + } + } + + if set.Err() != nil { + t.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + if counter == 0 && len(expected) > 0 { + t.Fatalf("No data was received") + } +} + +func TestCreateTSDB(t *testing.T) { + testCases := []struct { + desc string + conf *config.Schema + ignoreReason string + }{ + {desc: "Should create TSDB with standard configuration", conf: testutils.CreateSchema(t, "sum,count")}, + + {desc: "Should create TSDB with wildcard aggregations", conf: testutils.CreateSchema(t, "*")}, + } + + testParams := tsdbtest.NewTestParams(t) + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testCreateTSDBcase(t, test.conf, testParams) + }) + } +} + +func testCreateTSDBcase(t *testing.T, dbConfig *config.Schema, testParams tsdbtest.TestParams) { + defer tsdbtest.SetUpWithDBConfig(t, dbConfig, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create adapter. reason: %s", err) + } + + actualDbConfig := adapter.GetSchema() + assert.Equal(t, actualDbConfig, dbConfig) +} + +func TestDeleteTSDB(t *testing.T) { + v3ioConfig, err := tsdbtest.LoadV3ioConfig() + if err != nil { + t.Fatalf("unable to load configuration. Error: %v", err) + } + + schema := testutils.CreateSchema(t, "count,sum") + v3ioConfig.TablePath = tsdbtest.PrefixTablePath(t.Name()) + if err := CreateTSDB(v3ioConfig, schema, nil); err != nil { + v3ioConfigAsJson, _ := json.MarshalIndent(v3ioConfig, "", " ") + t.Fatalf("Failed to create TSDB. Reason: %s\nConfiguration:\n%s", err, string(v3ioConfigAsJson)) + } + + adapter, err := NewV3ioAdapter(v3ioConfig, nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + responseChan := make(chan *v3io.Response) + container, _ := adapter.GetContainer() + _, err = container.GetContainerContents(&v3io.GetContainerContentsInput{Path: v3ioConfig.TablePath}, 30, responseChan) + if err != nil { + t.Fatal(err.Error()) + } + if res := <-responseChan; res.Error != nil { + t.Fatal(res.Error.Error()) + } + + if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { + t.Fatalf("Failed to delete DB on teardown. reason: %s", err) + } + + _, err = container.GetContainerContents(&v3io.GetContainerContentsInput{Path: v3ioConfig.TablePath}, 30, responseChan) + if err != nil { + t.Fatal(err.Error()) + } + if res := <-responseChan; res.Error == nil { + t.Fatal("Did not delete TSDB properly") + } +} + +func TestIngestDataFloatThenString(t *testing.T) { + testParams := tsdbtest.NewTestParams(t) + + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} + _, err = appender.Add(labels, 1532940510000, 12.0) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + _, err = appender.Add(labels, 1532940610000, "tal") + if err == nil { + t.Fatal("expected failure but finished successfully") + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + + tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) +} + +func TestIngestDataStringThenFloat(t *testing.T) { + testParams := tsdbtest.NewTestParams(t) + + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} + _, err = appender.Add(labels, 1532940510000, "tal") + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + _, err = appender.Add(labels, 1532940610000, 666.0) + if err == nil { + t.Fatal("expected failure but finished successfully") + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + + tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) +} + +func iteratorToSlice(it chunkenc.Iterator) ([]tsdbtest.DataPoint, error) { + var result []tsdbtest.DataPoint + for it.Next() { + t, v := it.At() + if it.Err() != nil { + return nil, it.Err() + } + result = append(result, tsdbtest.DataPoint{Time: t, Value: v}) + } + return result, nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go new file mode 100644 index 00000000..7b1090e0 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go @@ -0,0 +1,327 @@ +/* +Copyright 2018 Iguazio Systems Ltd. + +Licensed under the Apache License, Version 2.0 (the "License") with +an addition restriction as set forth herein. You may not use this +file except in compliance with the License. You may obtain a copy of +the License at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +In addition, you may not use the software for any purposes that are +illegal under applicable law, and the grant of the foregoing license +under the Apache 2.0 license is conditioned upon your compliance with +such restriction. +*/ + +package tsdbctl + +import ( + "encoding/csv" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +const ArraySeparator = "," + +type addCommandeer struct { + cmd *cobra.Command + rootCommandeer *RootCommandeer + name string + lset string + tArr string + vArr string + inFile string + stdin bool + delay int +} + +func newAddCommandeer(rootCommandeer *RootCommandeer) *addCommandeer { + commandeer := &addCommandeer{ + rootCommandeer: rootCommandeer, + } + + cmd := &cobra.Command{ + Aliases: []string{"append"}, + Use: "add [] [] [flags]", + Short: "Add metric samples to a TSDB instance", + Long: `Add (ingest) metric samples into a TSDB instance (table).`, + Example: `The examples assume that the endpoint of the web-gateway service, the login credentials, and +the name of the data container are configured in the default configuration file (` + config.DefaultConfigurationFileName + `) +instead of using the -s|--server, -u|--username, -p|--password, and -c|--container flags. +- tsdbctl add temperature -t mytsdb -d 28 -m now-2h +- tsdbctl add http_req method=get -t mytsdb -d 99.9 +- tsdbctl add cpu "host=A,os=win" -t metrics-table -d "73.2,45.1" -m "1533026403000,now-1d" +- tsdbctl add -t perfstats -f ~/tsdb/tsdb_input.csv +- tsdbctl add log -t mytsdb -m now-2h -d "This thing has just happened" + +Notes: +- The command requires a metric name and one or more sample values. + You can provide this information either by using the argument and the -d|--values flag, + or by using the -f|--file flag to point to a CSV file that contains the required information. +- It is possible to ingest metrics containing string values, Though a single metric can contain either Floats or Strings, But not both. + +Arguments: + (string) The name of the metric for which to add samples. + The metric name must be provided either in this argument or in a + CSV file that is specified with the -f|--file flag. + (string) An optional list of labels to add, as a comma-separated list of + "
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VersionDescription
2.14.0Add support to pogs for interface types (#66 and #74)
2.13.1Fix bug with far far pointers (#71), use writev system call to encode multi-segment messages efficiently in Go 1.8+ (#70), and add GitHub-Linguist-compatible code generation comment
2.13.0Add Conn.Done and Conn.Err methods
2.12.4Fix size of created List(Float32)
2.12.3Fix bugs from fuzz tests: mismatched element size on list access causing crashes (#59) and miscellaneous packed reader issues
2.12.2Fix another shutdown race condition (#54)
2.12.1Fix ownership bug with receiver-hosted capabilities, add discriminant check to HasField (#55), fix multi-segment bug for data/text lists, and use nulls for setting empty data/text
2.12.0Add rpc.ConnLog option and fix race conditions and edge cases in RPC implementation
2.11.1Fix packed reader behavior on certain readers (#49), add capnp.UnmarshalPacked function that performs faster, and reduce locking overhead of segment maps
2.11.0Fix shutdown deadlock in RPC shutdown (#45)
2.10.1Work around lack of support for RPC-level promise capabilities (#2)
2.10.0Add pogs package (#33)
2.9.1Fix not-found behavior in schemas and add missing group IDs in generated embedded schemas
2.9.0Add encoding/text package (#20)
2.8.0Reduce generated code size for text fields and correct NUL check
2.7.0Insert compressed schema data into generated code
2.6.1Strip NUL byte from TextList.BytesAt and fix capnpc-go output for struct groups
2.6.0Add packages for predefined Cap'n Proto schemas
2.5.1Fix capnpc-go regression (#29) and strip trailing NUL byte in TextBytes accessor
2.5.0Add NewFoo method for list fields in generated structs (#7)
2.4.0Add maximum segment limit (#25)
2.3.0Add depth and traversal limit security checks
2.2.1Fix data race in reading Message from multiple goroutines
2.2.0Add HasFoo pointer field methods to generated code (#24)
2.1.0Introduce Ptr type and reduce allocations in single-segment cases
2.0.2Allow allocation-less string field access via TextList.BytesAt() and StringBytes() (#17)
2.0.1Allow nil params in client wrappers (#9) and fix integer underflow on compare function (#12)
2.0.0First release under zombiezen.com/go/capnproto2
diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/CONTRIBUTORS b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/CONTRIBUTORS new file mode 100644 index 00000000..bcdd5743 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/CONTRIBUTORS @@ -0,0 +1,33 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-capnproto repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# copyright belongs to the individual or the corporation. + +# Names should be added to this file like so: +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Alan Braithwaite +Albert Strasheim +Daniel Darabos +Eran Duchan +Evan Shaw +Ian Denhardt +James McKaskill +Jason E. Aten +Johan Hernandez +Joonsung Lee +Lev Radomislensky +Peter Waldschmidt +Ross Light +Tom Thorogood +TJ Holowaychuk +William Laffin diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/LICENSE b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/LICENSE new file mode 100644 index 00000000..3e590a19 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/LICENSE @@ -0,0 +1,25 @@ +go-capnproto is licensed under the terms of the MIT license reproduced below. + +=============================================================================== + +Copyright (C) 2014 the go-capnproto authors and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/README.md b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/README.md new file mode 100644 index 00000000..47d072b3 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/README.md @@ -0,0 +1,68 @@ +# Cap'n Proto bindings for Go + +[![GoDoc](https://godoc.org/zombiezen.com/go/capnproto2?status.svg)][godoc] +[![Build Status](https://travis-ci.org/capnproto/go-capnproto2.svg?branch=master)][travis] + +go-capnproto consists of: +- a Go code generator for [Cap'n Proto](https://capnproto.org/) +- a Go package that provides runtime support +- a Go package that implements Level 1 of the RPC protocol + +[godoc]: https://godoc.org/zombiezen.com/go/capnproto2 +[travis]: https://travis-ci.org/capnproto/go-capnproto2 + +## Getting started + +You will need the `capnp` tool to compile schemas into Go. +This package has been tested with Cap'n Proto 0.5.0. + +``` +$ go get -u -t zombiezen.com/go/capnproto2/... +$ go test -v zombiezen.com/go/capnproto2/... +``` + +This library uses [SemVer tags][] to indicate stable releases. +While the goal is that master should always be passing all known tests, tagged releases are vetted more. +When possible, use the [latest release tag](https://github.com/capnproto/go-capnproto2/releases). + +``` +$ cd $GOPATH/src/zombiezen.com/go/capnproto2 +$ git fetch +$ git checkout v2.16.0 # check the releases page for the latest +``` + +Then read the [Getting Started guide][]. + +[SemVer tags]: http://semver.org/ +[Getting Started guide]: https://github.com/capnproto/go-capnproto2/wiki/Getting-Started + +## API Compatibility + +Consider this package's API as beta software, since the Cap'n Proto spec is not final. +In the spirit of the [Go 1 compatibility guarantee][gocompat], I will make every effort to avoid making breaking API changes. +The major cases where I reserve the right to make breaking changes are: + +- Security. +- Changes in the Cap'n Proto specification. +- Bugs. + +The `pogs` package is relatively new and may change over time. +However, its functionality has been well-tested and will probably only relax restrictions. + +[gocompat]: https://golang.org/doc/go1compat + +## Documentation + +See the docs on [godoc.org][godoc]. + +## What is Cap'n Proto? + +The best cerealization... + +https://capnproto.org/ + +## License + +MIT - see [LICENSE][] file + +[LICENSE]: https://github.com/capnproto/go-capnproto2/blob/master/LICENSE diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/WORKSPACE b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/WORKSPACE new file mode 100644 index 00000000..9154090a --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/WORKSPACE @@ -0,0 +1,31 @@ +workspace(name = "com_zombiezen_go_capnproto2") + +git_repository( + name = "io_bazel_rules_go", + remote = "https://github.com/bazelbuild/rules_go.git", + commit = "43a3bda3eb97e7bcd86f564a1e0a4b008d6c407c", +) + +load("@io_bazel_rules_go//go:def.bzl", "go_repositories", "go_repository") + +go_repositories() + +go_repository( + name = "com_github_kylelemons_godebug", + importpath = "github.com/kylelemons/godebug", + sha256 = "4415b09bae90e41695bc17e4d00d0708e1f6bbb6e21cc22ce0146a26ddc243a7", + strip_prefix = "godebug-a616ab194758ae0a11290d87ca46ee8c440117b0", + urls = [ + "https://github.com/kylelemons/godebug/archive/a616ab194758ae0a11290d87ca46ee8c440117b0.zip", + ], +) + +go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sha256 = "880dc04d0af397dce6875ee2349bbb4295fe5a47352f7a4da4270456f726edd4", + strip_prefix = "net-f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f", + urls = [ + "https://github.com/golang/net/archive/f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f.zip", + ], +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/address.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/address.go new file mode 100644 index 00000000..0e06dc49 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/address.go @@ -0,0 +1,116 @@ +package capnp + +// An Address is an index inside a segment's data (in bytes). +type Address uint32 + +// addSize returns the address a+sz. +func (a Address) addSize(sz Size) (b Address, ok bool) { + x := int64(a) + int64(sz) + if x > int64(maxSize) { + return 0, false + } + return Address(x), true +} + +// element returns the address a+i*sz. +func (a Address) element(i int32, sz Size) (b Address, ok bool) { + x := int64(i) * int64(sz) + if x > int64(maxSize) { + return 0, false + } + x += int64(a) + if x > int64(maxSize) { + return 0, false + } + return Address(x), true +} + +// addOffset returns the address a+o. +func (a Address) addOffset(o DataOffset) Address { + return a + Address(o) +} + +// A Size is a size (in bytes). +type Size uint32 + +// wordSize is the number of bytes in a Cap'n Proto word. +const wordSize Size = 8 + +// maxSize is the maximum representable size. +const maxSize Size = 1<<32 - 1 + +// times returns the size sz*n. +func (sz Size) times(n int32) (ns Size, ok bool) { + x := int64(sz) * int64(n) + if x > int64(maxSize) { + return 0, false + } + return Size(x), true +} + +// padToWord adds padding to sz to make it divisible by wordSize. +func (sz Size) padToWord() Size { + n := Size(wordSize - 1) + return (sz + n) &^ n +} + +// DataOffset is an offset in bytes from the beginning of a struct's data section. +type DataOffset uint32 + +// ObjectSize records section sizes for a struct or list. +type ObjectSize struct { + DataSize Size + PointerCount uint16 +} + +// isZero reports whether sz is the zero size. +func (sz ObjectSize) isZero() bool { + return sz.DataSize == 0 && sz.PointerCount == 0 +} + +// isOneByte reports whether the object size is one byte (for Text/Data element sizes). +func (sz ObjectSize) isOneByte() bool { + return sz.DataSize == 1 && sz.PointerCount == 0 +} + +// isValid reports whether sz's fields are in range. +func (sz ObjectSize) isValid() bool { + return sz.DataSize <= 0xffff*wordSize +} + +// pointerSize returns the number of bytes the pointer section occupies. +func (sz ObjectSize) pointerSize() Size { + // Guaranteed not to overflow + return wordSize * Size(sz.PointerCount) +} + +// totalSize returns the number of bytes that the object occupies. +func (sz ObjectSize) totalSize() Size { + return sz.DataSize + sz.pointerSize() +} + +// dataWordCount returns the number of words in the data section. +func (sz ObjectSize) dataWordCount() int32 { + if sz.DataSize%wordSize != 0 { + panic("data size not aligned by word") + } + return int32(sz.DataSize / wordSize) +} + +// totalWordCount returns the number of words that the object occupies. +func (sz ObjectSize) totalWordCount() int32 { + return sz.dataWordCount() + int32(sz.PointerCount) +} + +// BitOffset is an offset in bits from the beginning of a struct's data section. +type BitOffset uint32 + +// offset returns the equivalent byte offset. +func (bit BitOffset) offset() DataOffset { + return DataOffset(bit / 8) +} + +// mask returns the bitmask for the bit. +func (bit BitOffset) mask() byte { + return byte(1 << (bit % 8)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/canonical.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/canonical.go new file mode 100644 index 00000000..40e5f2ba --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/canonical.go @@ -0,0 +1,161 @@ +package capnp + +import ( + "errors" + "fmt" +) + +// Canonicalize encodes a struct into its canonical form: a single- +// segment blob without a segment table. The result will be identical +// for equivalent structs, even as the schema evolves. The blob is +// suitable for hashing or signing. +func Canonicalize(s Struct) ([]byte, error) { + msg, seg, _ := NewMessage(SingleSegment(nil)) + if !s.IsValid() { + return seg.Data(), nil + } + root, err := NewRootStruct(seg, canonicalStructSize(s)) + if err != nil { + return nil, fmt.Errorf("canonicalize: %v", err) + } + if err := msg.SetRootPtr(root.ToPtr()); err != nil { + return nil, fmt.Errorf("canonicalize: %v", err) + } + if err := fillCanonicalStruct(root, s); err != nil { + return nil, fmt.Errorf("canonicalize: %v", err) + } + return seg.Data(), nil +} + +func canonicalPtr(dst *Segment, p Ptr) (Ptr, error) { + if !p.IsValid() { + return Ptr{}, nil + } + switch p.flags.ptrType() { + case structPtrType: + ss, err := NewStruct(dst, canonicalStructSize(p.Struct())) + if err != nil { + return Ptr{}, err + } + if err := fillCanonicalStruct(ss, p.Struct()); err != nil { + return Ptr{}, err + } + return ss.ToPtr(), nil + case listPtrType: + ll, err := canonicalList(dst, p.List()) + if err != nil { + return Ptr{}, err + } + return ll.ToPtr(), nil + case interfacePtrType: + return Ptr{}, errors.New("cannot canonicalize interface") + default: + panic("unreachable") + } +} + +func fillCanonicalStruct(dst, s Struct) error { + copy(dst.seg.slice(dst.off, dst.size.DataSize), s.seg.slice(s.off, s.size.DataSize)) + for i := uint16(0); i < dst.size.PointerCount; i++ { + p, err := s.Ptr(i) + if err != nil { + return fmt.Errorf("pointer %d: %v", i, err) + } + cp, err := canonicalPtr(dst.seg, p) + if err != nil { + return fmt.Errorf("pointer %d: %v", i, err) + } + if err := dst.SetPtr(i, cp); err != nil { + return fmt.Errorf("pointer %d: %v", i, err) + } + } + return nil +} + +func canonicalStructSize(s Struct) ObjectSize { + if !s.IsValid() { + return ObjectSize{} + } + var sz ObjectSize + // int32 will not overflow because max struct data size is 2^16 words. + for off := int32(s.size.DataSize &^ (wordSize - 1)); off >= 0; off -= int32(wordSize) { + if s.Uint64(DataOffset(off)) != 0 { + sz.DataSize = Size(off) + wordSize + break + } + } + for i := int32(s.size.PointerCount) - 1; i >= 0; i-- { + if s.seg.readRawPointer(s.pointerAddress(uint16(i))) != 0 { + sz.PointerCount = uint16(i + 1) + break + } + } + return sz +} + +func canonicalList(dst *Segment, l List) (List, error) { + if !l.IsValid() { + return List{}, nil + } + if l.size.PointerCount == 0 { + // Data only, just copy over. + sz := l.allocSize() + _, newAddr, err := alloc(dst, sz) + if err != nil { + return List{}, err + } + cl := List{ + seg: dst, + off: newAddr, + length: l.length, + size: l.size, + flags: l.flags, + depthLimit: maxDepth, + } + end, _ := l.off.addSize(sz) // list was already validated + copy(dst.data[newAddr:], l.seg.data[l.off:end]) + return cl, nil + } + if l.flags&isCompositeList == 0 { + cl, err := NewPointerList(dst, l.length) + if err != nil { + return List{}, err + } + for i := 0; i < l.Len(); i++ { + p, err := PointerList{l}.PtrAt(i) + if err != nil { + return List{}, fmt.Errorf("element %d: %v", i, err) + } + cp, err := canonicalPtr(dst, p) + if err != nil { + return List{}, fmt.Errorf("element %d: %v", i, err) + } + if err := cl.SetPtr(i, cp); err != nil { + return List{}, fmt.Errorf("element %d: %v", i, err) + } + } + return cl.List, nil + } + + // Struct/composite list + var elemSize ObjectSize + for i := 0; i < l.Len(); i++ { + sz := canonicalStructSize(l.Struct(i)) + if sz.DataSize > elemSize.DataSize { + elemSize.DataSize = sz.DataSize + } + if sz.PointerCount > elemSize.PointerCount { + elemSize.PointerCount = sz.PointerCount + } + } + cl, err := NewCompositeList(dst, elemSize, l.length) + if err != nil { + return List{}, err + } + for i := 0; i < cl.Len(); i++ { + if err := fillCanonicalStruct(cl.Struct(i), l.Struct(i)); err != nil { + return List{}, fmt.Errorf("element %d: %v", i, err) + } + } + return cl, nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capability.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capability.go new file mode 100644 index 00000000..d11c680d --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capability.go @@ -0,0 +1,541 @@ +package capnp + +import ( + "errors" + "strconv" + + "golang.org/x/net/context" +) + +// An Interface is a reference to a client in a message's capability table. +type Interface struct { + seg *Segment + cap CapabilityID +} + +// NewInterface creates a new interface pointer. No allocation is +// performed; s is only used for Segment()'s return value. +func NewInterface(s *Segment, cap CapabilityID) Interface { + return Interface{ + seg: s, + cap: cap, + } +} + +// ToInterface converts p to an Interface. +// +// Deprecated: Use Ptr.Interface. +func ToInterface(p Pointer) Interface { + if !IsValid(p) { + return Interface{} + } + i, ok := p.underlying().(Interface) + if !ok { + return Interface{} + } + return i +} + +// ToPtr converts the interface to a generic pointer. +func (p Interface) ToPtr() Ptr { + return Ptr{ + seg: p.seg, + lenOrCap: uint32(p.cap), + flags: interfacePtrFlag, + } +} + +// Segment returns the segment this pointer came from. +func (i Interface) Segment() *Segment { + return i.seg +} + +// IsValid returns whether the interface is valid. +func (i Interface) IsValid() bool { + return i.seg != nil +} + +// HasData is always true. +func (i Interface) HasData() bool { + return true +} + +// Capability returns the capability ID of the interface. +func (i Interface) Capability() CapabilityID { + return i.cap +} + +// value returns a raw interface pointer with the capability ID. +func (i Interface) value(paddr Address) rawPointer { + if i.seg == nil { + return 0 + } + return rawInterfacePointer(i.cap) +} + +func (i Interface) underlying() Pointer { + return i +} + +// Client returns the client stored in the message's capability table +// or nil if the pointer is invalid. +func (i Interface) Client() Client { + if i.seg == nil { + return nil + } + tab := i.seg.msg.CapTable + if int64(i.cap) >= int64(len(tab)) { + return nil + } + return tab[i.cap] +} + +// ErrNullClient is returned from a call made on a null client pointer. +var ErrNullClient = errors.New("capnp: call on null client") + +// A CapabilityID is an index into a message's capability table. +type CapabilityID uint32 + +// A Client represents an Cap'n Proto interface type. It is safe to use +// from multiple goroutines. +// +// Generally, only RPC protocol implementers should provide types that +// implement Client: call ordering guarantees, promises, and +// synchronization are tricky to get right. Prefer creating a server +// that wraps another interface than trying to implement Client. +type Client interface { + // Call starts executing a method and returns an answer that will hold + // the resulting struct. The call's parameters must be placed before + // Call() returns. + // + // Calls are delivered to the capability in the order they are made. + // This guarantee is based on the concept of a capability + // acknowledging delivery of a call: this is specific to an + // implementation of Client. A type that implements Client must + // guarantee that if foo() then bar() is called on a client, that + // acknowledging foo() happens before acknowledging bar(). + Call(call *Call) Answer + + // Close releases any resources associated with this client. + // No further calls to the client should be made after calling Close. + Close() error +} + +// The Call type holds the record for an outgoing interface call. +type Call struct { + // Ctx is the context of the call. + Ctx context.Context + + // Method is the interface ID and method ID, along with the optional name, + // of the method to call. + Method Method + + // Params is a struct containing parameters for the call. + // This should be set when the RPC system receives a call for an + // exported interface. It is mutually exclusive with ParamsFunc + // and ParamsSize. + Params Struct + // ParamsFunc is a function that populates an allocated struct with + // the parameters for the call. ParamsSize determines the size of the + // struct to allocate. This is used when application code is using a + // client. These settings should be set together; they are mutually + // exclusive with Params. + ParamsFunc func(Struct) error + ParamsSize ObjectSize + + // Options passes RPC-specific options for the call. + Options CallOptions +} + +// Copy clones a call, ensuring that its Params are placed. +// If Call.ParamsFunc is nil, then the same Call will be returned. +func (call *Call) Copy(s *Segment) (*Call, error) { + if call.ParamsFunc == nil { + return call, nil + } + p, err := call.PlaceParams(s) + if err != nil { + return nil, err + } + return &Call{ + Ctx: call.Ctx, + Method: call.Method, + Params: p, + Options: call.Options, + }, nil +} + +// PlaceParams returns the parameters struct, allocating it inside +// segment s as necessary. If s is nil, a new single-segment message +// is allocated. +func (call *Call) PlaceParams(s *Segment) (Struct, error) { + if call.ParamsFunc == nil { + return call.Params, nil + } + if s == nil { + var err error + _, s, err = NewMessage(SingleSegment(nil)) + if err != nil { + return Struct{}, err + } + } + p, err := NewStruct(s, call.ParamsSize) + if err != nil { + return Struct{}, nil + } + err = call.ParamsFunc(p) + return p, err +} + +// CallOptions holds RPC-specific options for an interface call. +// Its usage is similar to the values in context.Context, but is only +// used for a single call: its values are not intended to propagate to +// other callees. An example of an option would be the +// Call.sendResultsTo field in rpc.capnp. +type CallOptions struct { + m map[interface{}]interface{} +} + +// NewCallOptions builds a CallOptions value from a list of individual options. +func NewCallOptions(opts []CallOption) CallOptions { + co := CallOptions{make(map[interface{}]interface{})} + for _, o := range opts { + o.f(co) + } + return co +} + +// Value retrieves the value associated with the options for this key, +// or nil if no value is associated with this key. +func (co CallOptions) Value(key interface{}) interface{} { + return co.m[key] +} + +// With creates a copy of the CallOptions value with other options applied. +func (co CallOptions) With(opts []CallOption) CallOptions { + newopts := CallOptions{make(map[interface{}]interface{})} + for k, v := range co.m { + newopts.m[k] = v + } + for _, o := range opts { + o.f(newopts) + } + return newopts +} + +// A CallOption is a function that modifies options on an interface call. +type CallOption struct { + f func(CallOptions) +} + +// SetOptionValue returns a call option that associates a value to an +// option key. This can be retrieved later with CallOptions.Value. +func SetOptionValue(key, value interface{}) CallOption { + return CallOption{func(co CallOptions) { + co.m[key] = value + }} +} + +// An Answer is the deferred result of a client call, which is usually wrapped by a Pipeline. +type Answer interface { + // Struct waits until the call is finished and returns the result. + Struct() (Struct, error) + + // The following methods are the same as in Client except with + // an added transform parameter -- a path to the interface to use. + + PipelineCall(transform []PipelineOp, call *Call) Answer + PipelineClose(transform []PipelineOp) error +} + +// A Pipeline is a generic wrapper for an answer. +type Pipeline struct { + answer Answer + parent *Pipeline + op PipelineOp +} + +// NewPipeline returns a new pipeline based on an answer. +func NewPipeline(ans Answer) *Pipeline { + return &Pipeline{answer: ans} +} + +// Answer returns the answer the pipeline is derived from. +func (p *Pipeline) Answer() Answer { + return p.answer +} + +// Transform returns the operations needed to transform the root answer +// into the value p represents. +func (p *Pipeline) Transform() []PipelineOp { + n := 0 + for q := p; q.parent != nil; q = q.parent { + n++ + } + xform := make([]PipelineOp, n) + for i, q := n-1, p; q.parent != nil; i, q = i-1, q.parent { + xform[i] = q.op + } + return xform +} + +// Struct waits until the answer is resolved and returns the struct +// this pipeline represents. +func (p *Pipeline) Struct() (Struct, error) { + s, err := p.answer.Struct() + if err != nil { + return Struct{}, err + } + ptr, err := TransformPtr(s.ToPtr(), p.Transform()) + if err != nil { + return Struct{}, err + } + return ptr.Struct(), nil +} + +// Client returns the client version of p. +func (p *Pipeline) Client() *PipelineClient { + return (*PipelineClient)(p) +} + +// GetPipeline returns a derived pipeline which yields the pointer field given. +func (p *Pipeline) GetPipeline(off uint16) *Pipeline { + return p.GetPipelineDefault(off, nil) +} + +// GetPipelineDefault returns a derived pipeline which yields the pointer field given, +// defaulting to the value given. +func (p *Pipeline) GetPipelineDefault(off uint16, def []byte) *Pipeline { + return &Pipeline{ + answer: p.answer, + parent: p, + op: PipelineOp{ + Field: off, + DefaultValue: def, + }, + } +} + +// PipelineClient implements Client by calling to the pipeline's answer. +type PipelineClient Pipeline + +func (pc *PipelineClient) transform() []PipelineOp { + return (*Pipeline)(pc).Transform() +} + +// Call calls Answer.PipelineCall with the pipeline's transform. +func (pc *PipelineClient) Call(call *Call) Answer { + return pc.answer.PipelineCall(pc.transform(), call) +} + +// Close calls Answer.PipelineClose with the pipeline's transform. +func (pc *PipelineClient) Close() error { + return pc.answer.PipelineClose(pc.transform()) +} + +// A PipelineOp describes a step in transforming a pipeline. +// It maps closely with the PromisedAnswer.Op struct in rpc.capnp. +type PipelineOp struct { + Field uint16 + DefaultValue []byte +} + +// String returns a human-readable description of op. +func (op PipelineOp) String() string { + s := make([]byte, 0, 32) + s = append(s, "get field "...) + s = strconv.AppendInt(s, int64(op.Field), 10) + if op.DefaultValue == nil { + return string(s) + } + s = append(s, " with default"...) + return string(s) +} + +// A Method identifies a method along with an optional human-readable +// description of the method. +type Method struct { + InterfaceID uint64 + MethodID uint16 + + // Canonical name of the interface. May be empty. + InterfaceName string + // Method name as it appears in the schema. May be empty. + MethodName string +} + +// String returns a formatted string containing the interface name or +// the method name if present, otherwise it uses the raw IDs. +// This is suitable for use in error messages and logs. +func (m *Method) String() string { + buf := make([]byte, 0, 128) + if m.InterfaceName == "" { + buf = append(buf, '@', '0', 'x') + buf = strconv.AppendUint(buf, m.InterfaceID, 16) + } else { + buf = append(buf, m.InterfaceName...) + } + buf = append(buf, '.') + if m.MethodName == "" { + buf = append(buf, '@') + buf = strconv.AppendUint(buf, uint64(m.MethodID), 10) + } else { + buf = append(buf, m.MethodName...) + } + return string(buf) +} + +// Transform applies a sequence of pipeline operations to a pointer +// and returns the result. +// +// Deprecated: Use TransformPtr. +func Transform(p Pointer, transform []PipelineOp) (Pointer, error) { + pp, err := TransformPtr(toPtr(p), transform) + return pp.toPointer(), err +} + +// TransformPtr applies a sequence of pipeline operations to a pointer +// and returns the result. +func TransformPtr(p Ptr, transform []PipelineOp) (Ptr, error) { + n := len(transform) + if n == 0 { + return p, nil + } + s := p.Struct() + for _, op := range transform[:n-1] { + field, err := s.Ptr(op.Field) + if err != nil { + return Ptr{}, err + } + s, err = field.StructDefault(op.DefaultValue) + if err != nil { + return Ptr{}, err + } + } + op := transform[n-1] + p, err := s.Ptr(op.Field) + if err != nil { + return Ptr{}, err + } + if op.DefaultValue != nil { + p, err = p.Default(op.DefaultValue) + } + return p, err +} + +type immediateAnswer struct { + s Struct +} + +// ImmediateAnswer returns an Answer that accesses s. +func ImmediateAnswer(s Struct) Answer { + return immediateAnswer{s} +} + +func (ans immediateAnswer) Struct() (Struct, error) { + return ans.s, nil +} + +func (ans immediateAnswer) findClient(transform []PipelineOp) Client { + p, err := TransformPtr(ans.s.ToPtr(), transform) + if err != nil { + return ErrorClient(err) + } + return p.Interface().Client() +} + +func (ans immediateAnswer) PipelineCall(transform []PipelineOp, call *Call) Answer { + c := ans.findClient(transform) + if c == nil { + return ErrorAnswer(ErrNullClient) + } + return c.Call(call) +} + +func (ans immediateAnswer) PipelineClose(transform []PipelineOp) error { + c := ans.findClient(transform) + if c == nil { + return ErrNullClient + } + return c.Close() +} + +type errorAnswer struct { + e error +} + +// ErrorAnswer returns a Answer that always returns error e. +func ErrorAnswer(e error) Answer { + return errorAnswer{e} +} + +func (ans errorAnswer) Struct() (Struct, error) { + return Struct{}, ans.e +} + +func (ans errorAnswer) PipelineCall([]PipelineOp, *Call) Answer { + return ans +} + +func (ans errorAnswer) PipelineClose([]PipelineOp) error { + return ans.e +} + +// IsFixedAnswer reports whether an answer was created by +// ImmediateAnswer or ErrorAnswer. +func IsFixedAnswer(ans Answer) bool { + switch ans.(type) { + case immediateAnswer: + return true + case errorAnswer: + return true + default: + return false + } +} + +type errorClient struct { + e error +} + +// ErrorClient returns a Client that always returns error e. +func ErrorClient(e error) Client { + return errorClient{e} +} + +func (ec errorClient) Call(*Call) Answer { + return ErrorAnswer(ec.e) +} + +func (ec errorClient) Close() error { + return nil +} + +// IsErrorClient reports whether c was created with ErrorClient. +func IsErrorClient(c Client) bool { + _, ok := c.(errorClient) + return ok +} + +// MethodError is an error on an associated method. +type MethodError struct { + Method *Method + Err error +} + +// Error returns the method name concatenated with the error string. +func (e *MethodError) Error() string { + return e.Method.String() + ": " + e.Err.Error() +} + +// ErrUnimplemented is the error returned when a method is called on +// a server that does not implement the method. +var ErrUnimplemented = errors.New("capnp: method not implemented") + +// IsUnimplemented reports whether e indicates an unimplemented method error. +func IsUnimplemented(e error) bool { + if me, ok := e.(*MethodError); ok { + e = me.Err + } + return e == ErrUnimplemented +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capn.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capn.go new file mode 100644 index 00000000..6de4c836 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/capn.go @@ -0,0 +1,427 @@ +package capnp + +import ( + "encoding/binary" + "errors" +) + +// A SegmentID is a numeric identifier for a Segment. +type SegmentID uint32 + +// A Segment is an allocation arena for Cap'n Proto objects. +// It is part of a Message, which can contain other segments that +// reference each other. +type Segment struct { + msg *Message + id SegmentID + data []byte +} + +// Message returns the message that contains s. +func (s *Segment) Message() *Message { + return s.msg +} + +// ID returns the segment's ID. +func (s *Segment) ID() SegmentID { + return s.id +} + +// Data returns the raw byte slice for the segment. +func (s *Segment) Data() []byte { + return s.data +} + +func (s *Segment) inBounds(addr Address) bool { + return addr < Address(len(s.data)) +} + +func (s *Segment) regionInBounds(base Address, sz Size) bool { + end, ok := base.addSize(sz) + if !ok { + return false + } + return end <= Address(len(s.data)) +} + +// slice returns the segment of data from base to base+sz. +func (s *Segment) slice(base Address, sz Size) []byte { + // Bounds check should have happened before calling slice. + return s.data[base : base+Address(sz)] +} + +func (s *Segment) readUint8(addr Address) uint8 { + return s.slice(addr, 1)[0] +} + +func (s *Segment) readUint16(addr Address) uint16 { + return binary.LittleEndian.Uint16(s.slice(addr, 2)) +} + +func (s *Segment) readUint32(addr Address) uint32 { + return binary.LittleEndian.Uint32(s.slice(addr, 4)) +} + +func (s *Segment) readUint64(addr Address) uint64 { + return binary.LittleEndian.Uint64(s.slice(addr, 8)) +} + +func (s *Segment) readRawPointer(addr Address) rawPointer { + return rawPointer(s.readUint64(addr)) +} + +func (s *Segment) writeUint8(addr Address, val uint8) { + s.slice(addr, 1)[0] = val +} + +func (s *Segment) writeUint16(addr Address, val uint16) { + binary.LittleEndian.PutUint16(s.slice(addr, 2), val) +} + +func (s *Segment) writeUint32(addr Address, val uint32) { + binary.LittleEndian.PutUint32(s.slice(addr, 4), val) +} + +func (s *Segment) writeUint64(addr Address, val uint64) { + binary.LittleEndian.PutUint64(s.slice(addr, 8), val) +} + +func (s *Segment) writeRawPointer(addr Address, val rawPointer) { + s.writeUint64(addr, uint64(val)) +} + +// root returns a 1-element pointer list that references the first word +// in the segment. This only makes sense to call on the first segment +// in a message. +func (s *Segment) root() PointerList { + sz := ObjectSize{PointerCount: 1} + if !s.regionInBounds(0, sz.totalSize()) { + return PointerList{} + } + return PointerList{List{ + seg: s, + length: 1, + size: sz, + depthLimit: s.msg.depthLimit(), + }} +} + +func (s *Segment) lookupSegment(id SegmentID) (*Segment, error) { + if s.id == id { + return s, nil + } + return s.msg.Segment(id) +} + +func (s *Segment) readPtr(paddr Address, depthLimit uint) (ptr Ptr, err error) { + s, base, val, err := s.resolveFarPointer(paddr) + if err != nil { + return Ptr{}, err + } + if val == 0 { + return Ptr{}, nil + } + if depthLimit == 0 { + return Ptr{}, errDepthLimit + } + switch val.pointerType() { + case structPointer: + sp, err := s.readStructPtr(base, val) + if err != nil { + return Ptr{}, err + } + if !s.msg.ReadLimiter().canRead(sp.readSize()) { + return Ptr{}, errReadLimit + } + sp.depthLimit = depthLimit - 1 + return sp.ToPtr(), nil + case listPointer: + lp, err := s.readListPtr(base, val) + if err != nil { + return Ptr{}, err + } + if !s.msg.ReadLimiter().canRead(lp.readSize()) { + return Ptr{}, errReadLimit + } + lp.depthLimit = depthLimit - 1 + return lp.ToPtr(), nil + case otherPointer: + if val.otherPointerType() != 0 { + return Ptr{}, errOtherPointer + } + return Interface{ + seg: s, + cap: val.capabilityIndex(), + }.ToPtr(), nil + default: + // Only other types are far pointers. + return Ptr{}, errBadLandingPad + } +} + +func (s *Segment) readStructPtr(base Address, val rawPointer) (Struct, error) { + addr, ok := val.offset().resolve(base) + if !ok { + return Struct{}, errPointerAddress + } + sz := val.structSize() + if !s.regionInBounds(addr, sz.totalSize()) { + return Struct{}, errPointerAddress + } + return Struct{ + seg: s, + off: addr, + size: sz, + }, nil +} + +func (s *Segment) readListPtr(base Address, val rawPointer) (List, error) { + addr, ok := val.offset().resolve(base) + if !ok { + return List{}, errPointerAddress + } + lsize, ok := val.totalListSize() + if !ok { + return List{}, errOverflow + } + if !s.regionInBounds(addr, lsize) { + return List{}, errPointerAddress + } + lt := val.listType() + if lt == compositeList { + hdr := s.readRawPointer(addr) + var ok bool + addr, ok = addr.addSize(wordSize) + if !ok { + return List{}, errOverflow + } + if hdr.pointerType() != structPointer { + return List{}, errBadTag + } + sz := hdr.structSize() + n := int32(hdr.offset()) + // TODO(light): check that this has the same end address + if tsize, ok := sz.totalSize().times(n); !ok { + return List{}, errOverflow + } else if !s.regionInBounds(addr, tsize) { + return List{}, errPointerAddress + } + return List{ + seg: s, + size: sz, + off: addr, + length: n, + flags: isCompositeList, + }, nil + } + if lt == bit1List { + return List{ + seg: s, + off: addr, + length: val.numListElements(), + flags: isBitList, + }, nil + } + return List{ + seg: s, + size: val.elementSize(), + off: addr, + length: val.numListElements(), + }, nil +} + +func (s *Segment) resolveFarPointer(paddr Address) (dst *Segment, base Address, resolved rawPointer, err error) { + // Encoding details at https://capnproto.org/encoding.html#inter-segment-pointers + + val := s.readRawPointer(paddr) + switch val.pointerType() { + case doubleFarPointer: + padSeg, err := s.lookupSegment(val.farSegment()) + if err != nil { + return nil, 0, 0, err + } + padAddr := val.farAddress() + if !padSeg.regionInBounds(padAddr, wordSize*2) { + return nil, 0, 0, errPointerAddress + } + far := padSeg.readRawPointer(padAddr) + if far.pointerType() != farPointer { + return nil, 0, 0, errBadLandingPad + } + tagAddr, ok := padAddr.addSize(wordSize) + if !ok { + return nil, 0, 0, errOverflow + } + tag := padSeg.readRawPointer(tagAddr) + if pt := tag.pointerType(); (pt != structPointer && pt != listPointer) || tag.offset() != 0 { + return nil, 0, 0, errBadLandingPad + } + if dst, err = s.lookupSegment(far.farSegment()); err != nil { + return nil, 0, 0, err + } + return dst, 0, landingPadNearPointer(far, tag), nil + case farPointer: + var err error + dst, err = s.lookupSegment(val.farSegment()) + if err != nil { + return nil, 0, 0, err + } + padAddr := val.farAddress() + if !dst.regionInBounds(padAddr, wordSize) { + return nil, 0, 0, errPointerAddress + } + var ok bool + base, ok = padAddr.addSize(wordSize) + if !ok { + return nil, 0, 0, errOverflow + } + return dst, base, dst.readRawPointer(padAddr), nil + default: + var ok bool + base, ok = paddr.addSize(wordSize) + if !ok { + return nil, 0, 0, errOverflow + } + return s, base, val, nil + } +} + +func (s *Segment) writePtr(off Address, src Ptr, forceCopy bool) error { + if !src.IsValid() { + s.writeRawPointer(off, 0) + return nil + } + + // Copy src, if needed, and process pointers where placement is + // irrelevant (capabilities and zero-sized structs). + var srcAddr Address + var srcRaw rawPointer + switch src.flags.ptrType() { + case structPtrType: + st := src.Struct() + if st.size.isZero() { + // Zero-sized structs should always be encoded with offset -1 in + // order to avoid conflating with null. No allocation needed. + s.writeRawPointer(off, rawStructPointer(-1, ObjectSize{})) + return nil + } + if forceCopy || src.seg.msg != s.msg || st.flags&isListMember != 0 { + newSeg, newAddr, err := alloc(s, st.size.totalSize()) + if err != nil { + return err + } + dst := Struct{ + seg: newSeg, + off: newAddr, + size: st.size, + depthLimit: maxDepth, + // clear flags + } + if err := copyStruct(dst, st); err != nil { + return err + } + st = dst + src = dst.ToPtr() + } + srcAddr = st.off + srcRaw = rawStructPointer(0, st.size) + case listPtrType: + l := src.List() + if forceCopy || src.seg.msg != s.msg { + sz := l.allocSize() + newSeg, newAddr, err := alloc(s, sz) + if err != nil { + return err + } + dst := List{ + seg: newSeg, + off: newAddr, + length: l.length, + size: l.size, + flags: l.flags, + depthLimit: maxDepth, + } + if dst.flags&isCompositeList != 0 { + // Copy tag word + newSeg.writeRawPointer(newAddr, l.seg.readRawPointer(l.off-Address(wordSize))) + var ok bool + dst.off, ok = dst.off.addSize(wordSize) + if !ok { + return errOverflow + } + sz -= wordSize + } + if dst.flags&isBitList != 0 || dst.size.PointerCount == 0 { + end, _ := l.off.addSize(sz) // list was already validated + copy(newSeg.data[dst.off:], l.seg.data[l.off:end]) + } else { + for i := 0; i < l.Len(); i++ { + err := copyStruct(dst.Struct(i), l.Struct(i)) + if err != nil { + return err + } + } + } + l = dst + src = dst.ToPtr() + } + srcAddr = l.off + if l.flags&isCompositeList != 0 { + srcAddr -= Address(wordSize) + } + srcRaw = l.raw() + case interfacePtrType: + i := src.Interface() + if src.seg.msg != s.msg { + c := s.msg.AddCap(i.Client()) + i = NewInterface(s, c) + } + s.writeRawPointer(off, i.value(off)) + return nil + default: + panic("unreachable") + } + + switch { + case src.seg == s: + // Common case: src is in same segment as pointer. + // Use a near pointer. + s.writeRawPointer(off, srcRaw.withOffset(nearPointerOffset(off, srcAddr))) + return nil + case hasCapacity(src.seg.data, wordSize): + // Enough room adjacent to src to write a far pointer landing pad. + _, padAddr, _ := alloc(src.seg, wordSize) + src.seg.writeRawPointer(padAddr, srcRaw.withOffset(nearPointerOffset(padAddr, srcAddr))) + s.writeRawPointer(off, rawFarPointer(src.seg.id, padAddr)) + return nil + default: + // Not enough room for a landing pad, need to use a double-far pointer. + padSeg, padAddr, err := alloc(s, wordSize*2) + if err != nil { + return err + } + padSeg.writeRawPointer(padAddr, rawFarPointer(src.seg.id, srcAddr)) + padSeg.writeRawPointer(padAddr+Address(wordSize), srcRaw) + s.writeRawPointer(off, rawDoubleFarPointer(padSeg.id, padAddr)) + return nil + } +} + +var ( + errPointerAddress = errors.New("capnp: invalid pointer address") + errBadLandingPad = errors.New("capnp: invalid far pointer landing pad") + errBadTag = errors.New("capnp: invalid tag word") + errOtherPointer = errors.New("capnp: unknown pointer type") + errObjectSize = errors.New("capnp: invalid object size") + errElementSize = errors.New("capnp: mismatched list element size") + errReadLimit = errors.New("capnp: read traversal limit reached") + errDepthLimit = errors.New("capnp: depth limit reached") +) + +var ( + errOverflow = errors.New("capnp: address or size overflow") + errOutOfBounds = errors.New("capnp: address out of bounds") + errCopyDepth = errors.New("capnp: copy depth too large") + errOverlap = errors.New("capnp: overlapping data on copy") + errListSize = errors.New("capnp: invalid list size") +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/doc.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/doc.go new file mode 100644 index 00000000..ef7f6497 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/doc.go @@ -0,0 +1,384 @@ +/* +Package capnp is a Cap'n Proto library for Go. +https://capnproto.org/ + +Read the Getting Started guide for a tutorial on how to use this +package. https://github.com/capnproto/go-capnproto2/wiki/Getting-Started + +Generating code + +capnpc-go provides the compiler backend for capnp. + + # First, install capnpc-go to $PATH. + go install zombiezen.com/go/capnproto2/capnpc-go + # Then, generate Go files. + capnp compile -I$GOPATH/src/zombiezen.com/go/capnproto2/std -ogo *.capnp + +capnpc-go requires two annotations for all files: package and import. +package is needed to know what package to place at the head of the +generated file and what identifier to use when referring to the type +from another package. import should be the fully qualified import path +and is used to generate import statement from other packages and to +detect when two types are in the same package. For example: + + using Go = import "/go.capnp"; + $Go.package("main"); + $Go.import("zombiezen.com/go/capnproto2/example"); + +For adding documentation comments to the generated code, there's the doc +annotation. This annotation adds the comment to a struct, enum or field so +that godoc will pick it up. For example: + + struct Zdate $Go.doc("Zdate represents a calendar date") { + year @0 :Int16; + month @1 :UInt8; + day @2 :UInt8 ; + } + +Messages and Segments + +In Cap'n Proto, the unit of communication is a message. A message +consists of one or more segments -- contiguous blocks of memory. This +allows large messages to be split up and loaded independently or lazily. +Typically you will use one segment per message. Logically, a message is +organized in a tree of objects, with the root always being a struct (as +opposed to a list or primitive). Messages can be read from and written +to a stream. + +The Message and Segment types are the main types that application code +will use from this package. The Message type has methods for marshaling +and unmarshaling its segments to the wire format. If the application +needs to read or write from a stream, it should use the Encoder and +Decoder types. + +Pointers + +The type for a generic reference to a Cap'n Proto object is Ptr. A Ptr +can refer to a struct, a list, or an interface. Ptr, Struct, List, and +Interface (the pointer types) have value semantics and refer to data in +a single segment. All of the pointer types have a notion of "valid". +An invalid pointer will return the default value from any accessor and +panic when any setter is called. + +In previous versions of this package, the Pointer interface was used +instead of the Ptr struct. This interface and functions that use it are +now deprecated. See https://github.com/capnproto/go-capnproto2/wiki/New-Ptr-Type +for details about this API change. + +Data accessors and setters (i.e. struct primitive fields and list +elements) do not return errors, but pointer accessors and setters do. +There are a few reasons that a read or write of a pointer can fail, but +the most common are bad pointers or allocation failures. For accessors, +an invalid object will be returned in case of an error. + +Since Go doesn't have generics, wrapper types provide type safety on +lists. This package provides lists of basic types, and capnpc-go +generates list wrappers for named types. However, if you need to use +deeper nesting of lists (e.g. List(List(UInt8))), you will need to use a +PointerList and wrap the elements. + +Structs + +For the following schema: + +struct Foo @0x8423424e9b01c0af { + num @0 :UInt32; + bar @1 :Foo; +} + +capnpc-go will generate: + + // Foo is a pointer to a Foo struct in a segment. + // Member functions are provided to get/set members in the + // struct. + type Foo struct{ capnp.Struct } + + // Foo_TypeID is the unique identifier for the type Foo. + // It remains the same across languages and schema changes. + const Foo_TypeID = 0x8423424e9b01c0af + + // NewFoo creates a new orphaned Foo struct, preferring placement in + // s. If there isn't enough space, then another segment in the + // message will be used or allocated. You can set a field of type Foo + // to this new message, but usually you will want to use the + // NewBar()-style method shown below. + func NewFoo(s *capnp.Segment) (Foo, error) + + // NewRootFoo creates a new Foo struct and sets the message's root to + // it. + func NewRootFoo(s *capnp.Segment) (Foo, error) + + // ReadRootFoo reads the message's root pointer and converts it to a + // Foo struct. + func ReadRootFoo(msg *capnp.Message) (Foo, error) + + // Num returns the value of the num field. + func (s Foo) Num() uint32 + + // SetNum sets the value of the num field to v. + func (s Foo) SetNum(v uint32) + + // Bar returns the value of the bar field. This can return an error + // if the pointer goes beyond the segment's range, the segment fails + // to load, or the pointer recursion limit has been reached. + func (s Foo) Bar() (Foo, error) + + // HasBar reports whether the bar field was initialized (non-null). + func (s Foo) HasBar() bool + + // SetBar sets the value of the bar field to v. + func (s Foo) SetBar(v Foo) error + + // NewBar sets the bar field to a newly allocated Foo struct, + // preferring placement in s's segment. + func (s Foo) NewBar() (Foo, error) + + // Foo_List is a value with pointer semantics. It is created for all + // structs, and is used for List(Foo) in the capnp file. + type Foo_List struct{ capnp.List } + + // NewFoo_List creates a new orphaned List(Foo), preferring placement + // in s. This can then be added to a message by using a Set function + // which takes a Foo_List. sz specifies the number of elements in the + // list. The list's size cannot be changed after creation. + func NewFoo_List(s *capnp.Segment, sz int32) Foo_List + + // Len returns the number of elements in the list. + func (s Foo_List) Len() int + + // At returns a pointer to the i'th element. If i is an invalid index, + // this will return an invalid Foo (all getters will return default + // values, setters will fail). + func (s Foo_List) At(i int) Foo + + // Foo_Promise is a promise for a Foo. Methods are provided to get + // promises of struct and interface fields. + type Foo_Promise struct{ *capnp.Pipeline } + + // Get waits until the promise is resolved and returns the result. + func (p Foo_Promise) Get() (Foo, error) + + // Bar returns a promise for that bar field. + func (p Foo_Promise) Bar() Foo_Promise + + +Groups + +For each group a typedef is created with a different method set for just the +groups fields: + + struct Foo { + group :Group { + field @0 :Bool; + } + } + +generates the following: + + type Foo struct{ capnp.Struct } + type Foo_group Foo + + func (s Foo) Group() Foo_group + func (s Foo_group) Field() bool + +That way the following may be used to access a field in a group: + + var f Foo + value := f.Group().Field() + +Note that group accessors just convert the type and so have no overhead. + +Unions + +Named unions are treated as a group with an inner unnamed union. Unnamed +unions generate an enum Type_Which and a corresponding Which() function: + + struct Foo { + union { + a @0 :Bool; + b @1 :Bool; + } + } + +generates the following: + + type Foo_Which uint16 + + const ( + Foo_Which_a Foo_Which = 0 + Foo_Which_b Foo_Which = 1 + ) + + func (s Foo) A() bool + func (s Foo) B() bool + func (s Foo) SetA(v bool) + func (s Foo) SetB(v bool) + func (s Foo) Which() Foo_Which + +Which() should be checked before using the getters, and the default case must +always be handled. + +Setters for single values will set the union discriminator as well as set the +value. + +For voids in unions, there is a void setter that just sets the discriminator. +For example: + + struct Foo { + union { + a @0 :Void; + b @1 :Void; + } + } + +generates the following: + + func (s Foo) SetA() // Set that we are using A + func (s Foo) SetB() // Set that we are using B + +Similarly, for groups in unions, there is a group setter that just sets +the discriminator. This must be called before the group getter can be +used to set values. For example: + + struct Foo { + union { + a :group { + v :Bool + } + b :group { + v :Bool + } + } + } + +and in usage: + + f.SetA() // Set that we are using group A + f.A().SetV(true) // then we can use the group A getter to set the inner values + +Enums + +capnpc-go generates enum values as constants. For example in the capnp file: + + enum ElementSize { + empty @0; + bit @1; + byte @2; + twoBytes @3; + fourBytes @4; + eightBytes @5; + pointer @6; + inlineComposite @7; + } + +In the generated capnp.go file: + + type ElementSize uint16 + + const ( + ElementSize_empty ElementSize = 0 + ElementSize_bit ElementSize = 1 + ElementSize_byte ElementSize = 2 + ElementSize_twoBytes ElementSize = 3 + ElementSize_fourBytes ElementSize = 4 + ElementSize_eightBytes ElementSize = 5 + ElementSize_pointer ElementSize = 6 + ElementSize_inlineComposite ElementSize = 7 + ) + +In addition an enum.String() function is generated that will convert the constants to a string +for debugging or logging purposes. By default, the enum name is used as the tag value, +but the tags can be customized with a $Go.tag or $Go.notag annotation. + +For example: + + enum ElementSize { + empty @0 $Go.tag("void"); + bit @1 $Go.tag("1 bit"); + byte @2 $Go.tag("8 bits"); + inlineComposite @7 $Go.notag; + } + +In the generated go file: + + func (c ElementSize) String() string { + switch c { + case ElementSize_empty: + return "void" + case ElementSize_bit: + return "1 bit" + case ElementSize_byte: + return "8 bits" + default: + return "" + } + } + +Interfaces + +capnpc-go generates type-safe Client wrappers for interfaces. For parameter +lists and result lists, structs are generated as described above with the names +Interface_method_Params and Interface_method_Results, unless a single struct +type is used. For example, for this interface: + + interface Calculator { + evaluate @0 (expression :Expression) -> (value :Value); + } + +capnpc-go generates the following Go code (along with the structs +Calculator_evaluate_Params and Calculator_evaluate_Results): + + // Calculator is a client to a Calculator interface. + type Calculator struct{ Client capnp.Client } + + // Evaluate calls `evaluate` on the client. params is called on a newly + // allocated Calculator_evaluate_Params struct to fill in the parameters. + func (c Calculator) Evaluate( + ctx context.Context, + params func(Calculator_evaluate_Params) error, + opts ...capnp.CallOption) *Calculator_evaluate_Results_Promise + +capnpc-go also generates code to implement the interface: + + // A Calculator_Server implements the Calculator interface. + type Calculator_Server interface { + Evaluate(Calculator_evaluate_Call) error + } + + // Calculator_evaluate_Call holds the arguments for a Calculator.evaluate server call. + type Calculator_evaluate_Call struct { + Ctx context.Context + Options capnp.CallOptions + Params Calculator_evaluate_Params + Results Calculator_evaluate_Results + } + + // Calculator_ServerToClient is equivalent to calling: + // NewCalculator(capnp.NewServer(Calculator_Methods(nil, s), s)) + // If s does not implement the Close method, then nil is used. + func Calculator_ServerToClient(s Calculator_Server) Calculator + + // Calculator_Methods appends methods from Calculator that call to server and + // returns the methods. If methods is nil or the capacity of the underlying + // slice is too small, a new slice is returned. + func Calculator_Methods(methods []server.Method, s Calculator_Server) []server.Method + +Since a single capability may want to implement many interfaces, you can +use multiple *_Methods functions to build a single slice to send to +NewServer. + +An example of combining the client/server code to communicate with a locally +implemented Calculator: + + var srv Calculator_Server + calc := Calculator_ServerToClient(srv) + result := calc.Evaluate(ctx, func(params Calculator_evaluate_Params) { + params.SetExpression(expr) + }) + val := result.Value().Get() + +A note about message ordering: when implementing a server method, you +are responsible for acknowledging delivery of a method call. Failure to +do so can cause deadlocks. See the server.Ack function for more details. +*/ +package capnp // import "zombiezen.com/go/capnproto2" diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/BUILD.bazel b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/BUILD.bazel new file mode 100644 index 00000000..ec355034 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["marshal.go"], + visibility = ["//visibility:public"], + deps = [ + "//:go_default_library", + "//internal/nodemap:go_default_library", + "//internal/schema:go_default_library", + "//internal/strquote:go_default_library", + "//schemas:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["marshal_test.go"], + data = glob(["testdata/**"]), + library = ":go_default_library", + deps = [ + "//:go_default_library", + "//internal/schema:go_default_library", + "//schemas:go_default_library", + ], +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/marshal.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/marshal.go new file mode 100644 index 00000000..48cdaf57 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/encoding/text/marshal.go @@ -0,0 +1,455 @@ +// Package text supports marshaling Cap'n Proto messages as text based on a schema. +package text + +import ( + "bytes" + "fmt" + "io" + "math" + "strconv" + + "zombiezen.com/go/capnproto2" + "zombiezen.com/go/capnproto2/internal/nodemap" + "zombiezen.com/go/capnproto2/internal/schema" + "zombiezen.com/go/capnproto2/internal/strquote" + "zombiezen.com/go/capnproto2/schemas" +) + +// Marker strings. +const ( + voidMarker = "void" + interfaceMarker = "" + anyPointerMarker = "" +) + +// Marshal returns the text representation of a struct. +func Marshal(typeID uint64, s capnp.Struct) (string, error) { + buf := new(bytes.Buffer) + if err := NewEncoder(buf).Encode(typeID, s); err != nil { + return "", err + } + return buf.String(), nil +} + +// MarshalList returns the text representation of a struct list. +func MarshalList(typeID uint64, l capnp.List) (string, error) { + buf := new(bytes.Buffer) + if err := NewEncoder(buf).EncodeList(typeID, l); err != nil { + return "", err + } + return buf.String(), nil +} + +// An Encoder writes the text format of Cap'n Proto messages to an output stream. +type Encoder struct { + w errWriter + tmp []byte + nodes nodemap.Map +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: errWriter{w: w}} +} + +// UseRegistry changes the registry that the encoder consults for +// schemas from the default registry. +func (enc *Encoder) UseRegistry(reg *schemas.Registry) { + enc.nodes.UseRegistry(reg) +} + +// Encode writes the text representation of s to the stream. +func (enc *Encoder) Encode(typeID uint64, s capnp.Struct) error { + if enc.w.err != nil { + return enc.w.err + } + err := enc.marshalStruct(typeID, s) + if err != nil { + return err + } + return enc.w.err +} + +// EncodeList writes the text representation of struct list l to the stream. +func (enc *Encoder) EncodeList(typeID uint64, l capnp.List) error { + _, seg, _ := capnp.NewMessage(capnp.SingleSegment(nil)) + typ, _ := schema.NewRootType(seg) + typ.SetStructType() + typ.StructType().SetTypeId(typeID) + return enc.marshalList(typ, l) +} + +func (enc *Encoder) marshalBool(v bool) { + if v { + enc.w.WriteString("true") + } else { + enc.w.WriteString("false") + } +} + +func (enc *Encoder) marshalInt(i int64) { + enc.tmp = strconv.AppendInt(enc.tmp[:0], i, 10) + enc.w.Write(enc.tmp) +} + +func (enc *Encoder) marshalUint(i uint64) { + enc.tmp = strconv.AppendUint(enc.tmp[:0], i, 10) + enc.w.Write(enc.tmp) +} + +func (enc *Encoder) marshalFloat32(f float32) { + enc.tmp = strconv.AppendFloat(enc.tmp[:0], float64(f), 'g', -1, 32) + enc.w.Write(enc.tmp) +} + +func (enc *Encoder) marshalFloat64(f float64) { + enc.tmp = strconv.AppendFloat(enc.tmp[:0], f, 'g', -1, 64) + enc.w.Write(enc.tmp) +} + +func (enc *Encoder) marshalText(t []byte) { + enc.tmp = strquote.Append(enc.tmp[:0], t) + enc.w.Write(enc.tmp) +} + +func needsEscape(b byte) bool { + return b < 0x20 || b >= 0x7f +} + +func hexDigit(b byte) byte { + const digits = "0123456789abcdef" + return digits[b] +} + +func (enc *Encoder) marshalStruct(typeID uint64, s capnp.Struct) error { + n, err := enc.nodes.Find(typeID) + if err != nil { + return err + } + if !n.IsValid() || n.Which() != schema.Node_Which_structNode { + return fmt.Errorf("cannot find struct type %#x", typeID) + } + var discriminant uint16 + if n.StructNode().DiscriminantCount() > 0 { + discriminant = s.Uint16(capnp.DataOffset(n.StructNode().DiscriminantOffset() * 2)) + } + enc.w.WriteByte('(') + fields := codeOrderFields(n.StructNode()) + first := true + for _, f := range fields { + if !(f.Which() == schema.Field_Which_slot || f.Which() == schema.Field_Which_group) { + continue + } + if dv := f.DiscriminantValue(); !(dv == schema.Field_noDiscriminant || dv == discriminant) { + continue + } + if !first { + enc.w.WriteString(", ") + } + first = false + name, err := f.NameBytes() + if err != nil { + return err + } + enc.w.Write(name) + enc.w.WriteString(" = ") + switch f.Which() { + case schema.Field_Which_slot: + if err := enc.marshalFieldValue(s, f); err != nil { + return err + } + case schema.Field_Which_group: + if err := enc.marshalStruct(f.Group().TypeId(), s); err != nil { + return err + } + } + } + enc.w.WriteByte(')') + return nil +} + +func (enc *Encoder) marshalFieldValue(s capnp.Struct, f schema.Field) error { + typ, err := f.Slot().Type() + if err != nil { + return err + } + dv, err := f.Slot().DefaultValue() + if err != nil { + return err + } + if dv.IsValid() && int(typ.Which()) != int(dv.Which()) { + name, _ := f.Name() + return fmt.Errorf("marshal field %s: default value is a %v, want %v", name, dv.Which(), typ.Which()) + } + switch typ.Which() { + case schema.Type_Which_void: + enc.w.WriteString(voidMarker) + case schema.Type_Which_bool: + v := s.Bit(capnp.BitOffset(f.Slot().Offset())) + d := dv.Bool() + enc.marshalBool(!d && v || d && !v) + case schema.Type_Which_int8: + v := s.Uint8(capnp.DataOffset(f.Slot().Offset())) + d := uint8(dv.Int8()) + enc.marshalInt(int64(int8(v ^ d))) + case schema.Type_Which_int16: + v := s.Uint16(capnp.DataOffset(f.Slot().Offset() * 2)) + d := uint16(dv.Int16()) + enc.marshalInt(int64(int16(v ^ d))) + case schema.Type_Which_int32: + v := s.Uint32(capnp.DataOffset(f.Slot().Offset() * 4)) + d := uint32(dv.Int32()) + enc.marshalInt(int64(int32(v ^ d))) + case schema.Type_Which_int64: + v := s.Uint64(capnp.DataOffset(f.Slot().Offset() * 8)) + d := uint64(dv.Int64()) + enc.marshalInt(int64(v ^ d)) + case schema.Type_Which_uint8: + v := s.Uint8(capnp.DataOffset(f.Slot().Offset())) + d := dv.Uint8() + enc.marshalUint(uint64(v ^ d)) + case schema.Type_Which_uint16: + v := s.Uint16(capnp.DataOffset(f.Slot().Offset() * 2)) + d := dv.Uint16() + enc.marshalUint(uint64(v ^ d)) + case schema.Type_Which_uint32: + v := s.Uint32(capnp.DataOffset(f.Slot().Offset() * 4)) + d := dv.Uint32() + enc.marshalUint(uint64(v ^ d)) + case schema.Type_Which_uint64: + v := s.Uint64(capnp.DataOffset(f.Slot().Offset() * 8)) + d := dv.Uint64() + enc.marshalUint(v ^ d) + case schema.Type_Which_float32: + v := s.Uint32(capnp.DataOffset(f.Slot().Offset() * 4)) + d := math.Float32bits(dv.Float32()) + enc.marshalFloat32(math.Float32frombits(v ^ d)) + case schema.Type_Which_float64: + v := s.Uint64(capnp.DataOffset(f.Slot().Offset() * 8)) + d := math.Float64bits(dv.Float64()) + enc.marshalFloat64(math.Float64frombits(v ^ d)) + case schema.Type_Which_structType: + p, err := s.Ptr(uint16(f.Slot().Offset())) + if err != nil { + return err + } + if !p.IsValid() { + p, _ = dv.StructValuePtr() + } + return enc.marshalStruct(typ.StructType().TypeId(), p.Struct()) + case schema.Type_Which_data: + p, err := s.Ptr(uint16(f.Slot().Offset())) + if err != nil { + return err + } + if !p.IsValid() { + b, _ := dv.Data() + enc.marshalText(b) + return nil + } + enc.marshalText(p.Data()) + case schema.Type_Which_text: + p, err := s.Ptr(uint16(f.Slot().Offset())) + if err != nil { + return err + } + if !p.IsValid() { + b, _ := dv.TextBytes() + enc.marshalText(b) + return nil + } + enc.marshalText(p.TextBytes()) + case schema.Type_Which_list: + elem, err := typ.List().ElementType() + if err != nil { + return err + } + p, err := s.Ptr(uint16(f.Slot().Offset())) + if err != nil { + return err + } + if !p.IsValid() { + p, _ = dv.ListPtr() + } + return enc.marshalList(elem, p.List()) + case schema.Type_Which_enum: + v := s.Uint16(capnp.DataOffset(f.Slot().Offset() * 2)) + d := dv.Uint16() + return enc.marshalEnum(typ.Enum().TypeId(), v^d) + case schema.Type_Which_interface: + enc.w.WriteString(interfaceMarker) + case schema.Type_Which_anyPointer: + enc.w.WriteString(anyPointerMarker) + default: + return fmt.Errorf("unknown field type %v", typ.Which()) + } + return nil +} + +func codeOrderFields(s schema.Node_structNode) []schema.Field { + list, _ := s.Fields() + n := list.Len() + fields := make([]schema.Field, n) + for i := 0; i < n; i++ { + f := list.At(i) + fields[f.CodeOrder()] = f + } + return fields +} + +func (enc *Encoder) marshalList(elem schema.Type, l capnp.List) error { + switch elem.Which() { + case schema.Type_Which_void: + enc.w.WriteString(capnp.VoidList{List: l}.String()) + case schema.Type_Which_bool: + enc.w.WriteString(capnp.BitList{List: l}.String()) + case schema.Type_Which_int8: + enc.w.WriteString(capnp.Int8List{List: l}.String()) + case schema.Type_Which_int16: + enc.w.WriteString(capnp.Int16List{List: l}.String()) + case schema.Type_Which_int32: + enc.w.WriteString(capnp.Int32List{List: l}.String()) + case schema.Type_Which_int64: + enc.w.WriteString(capnp.Int64List{List: l}.String()) + case schema.Type_Which_uint8: + enc.w.WriteString(capnp.UInt8List{List: l}.String()) + case schema.Type_Which_uint16: + enc.w.WriteString(capnp.UInt16List{List: l}.String()) + case schema.Type_Which_uint32: + enc.w.WriteString(capnp.UInt32List{List: l}.String()) + case schema.Type_Which_uint64: + enc.w.WriteString(capnp.UInt64List{List: l}.String()) + case schema.Type_Which_float32: + enc.w.WriteString(capnp.Float32List{List: l}.String()) + case schema.Type_Which_float64: + enc.w.WriteString(capnp.Float64List{List: l}.String()) + case schema.Type_Which_data: + enc.w.WriteString(capnp.DataList{List: l}.String()) + case schema.Type_Which_text: + enc.w.WriteString(capnp.TextList{List: l}.String()) + case schema.Type_Which_structType: + enc.w.WriteByte('[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + err := enc.marshalStruct(elem.StructType().TypeId(), l.Struct(i)) + if err != nil { + return err + } + } + enc.w.WriteByte(']') + case schema.Type_Which_list: + enc.w.WriteByte('[') + ee, err := elem.List().ElementType() + if err != nil { + return err + } + for i := 0; i < l.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + p, err := capnp.PointerList{List: l}.PtrAt(i) + if err != nil { + return err + } + err = enc.marshalList(ee, p.List()) + if err != nil { + return err + } + } + enc.w.WriteByte(']') + case schema.Type_Which_enum: + enc.w.WriteByte('[') + il := capnp.UInt16List{List: l} + typ := elem.Enum().TypeId() + // TODO(light): only search for node once + for i := 0; i < il.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + enc.marshalEnum(typ, il.At(i)) + } + enc.w.WriteByte(']') + case schema.Type_Which_interface: + enc.w.WriteByte('[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + enc.w.WriteString(interfaceMarker) + } + enc.w.WriteByte(']') + case schema.Type_Which_anyPointer: + enc.w.WriteByte('[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + enc.w.WriteString(", ") + } + enc.w.WriteString(anyPointerMarker) + } + enc.w.WriteByte(']') + default: + return fmt.Errorf("unknown list type %v", elem.Which()) + } + return nil +} + +func (enc *Encoder) marshalEnum(typ uint64, val uint16) error { + n, err := enc.nodes.Find(typ) + if err != nil { + return err + } + if n.Which() != schema.Node_Which_enum { + return fmt.Errorf("marshaling enum of type @%#x: type is not an enum", typ) + } + enums, err := n.Enum().Enumerants() + if err != nil { + return err + } + if int(val) >= enums.Len() { + enc.marshalUint(uint64(val)) + return nil + } + name, err := enums.At(int(val)).NameBytes() + if err != nil { + return err + } + enc.w.Write(name) + return nil +} + +type errWriter struct { + w io.Writer + err error +} + +func (ew *errWriter) Write(p []byte) (int, error) { + if ew.err != nil { + return 0, ew.err + } + var n int + n, ew.err = ew.w.Write(p) + return n, ew.err +} + +func (ew *errWriter) WriteString(s string) (int, error) { + if ew.err != nil { + return 0, ew.err + } + var n int + n, ew.err = io.WriteString(ew.w, s) + return n, ew.err +} + +func (ew *errWriter) WriteByte(b byte) error { + if ew.err != nil { + return ew.err + } + if bw, ok := ew.w.(io.ByteWriter); ok { + ew.err = bw.WriteByte(b) + } else { + _, ew.err = ew.w.Write([]byte{b}) + } + return ew.err +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/go.capnp.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/go.capnp.go new file mode 100644 index 00000000..28f18bab --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/go.capnp.go @@ -0,0 +1,45 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package capnp + +import ( + schemas "zombiezen.com/go/capnproto2/schemas" +) + +const Package = uint64(0xbea97f1023792be0) +const Import = uint64(0xe130b601260e44b5) +const Doc = uint64(0xc58ad6bd519f935e) +const Tag = uint64(0xa574b41924caefc7) +const Notag = uint64(0xc8768679ec52e012) +const Customtype = uint64(0xfa10659ae02f2093) +const Name = uint64(0xc2b96012172f8df1) +const schema_d12a1c51fedd6c88 = "x\xda\x12\x98\xe2\xc0d\xc8z\x9c\x89\x81!P\x81\x95" + + "\xed\xff\xf1\xf7\xa7T$\xb7\x94,e\x08\xe4e\xe5\xf8" + + "\xdf\x91s\xf7_\xa0\x8c\xd6E\x06\x06FaO\xc6." + + "\xe1@Fv\x06\x86`\x1fFfF\x06\xc6\xff\x0f\xb4" + + "+\x95\x05\xeaW\xee\x03)eDQj\xcb\xb8J\xd8" + + "\x15\xac\xd4\x01\xa2\xf4c\xaf\xbe\xb8P\xc2\xceC\x0c\x17" + + "yY\xff\xf1\xa3\xa85d\x9c$l\x09Vk\x02Q" + + "\x1b7y~\xe0\xdek]GA\xc6\x9a\xa0(Ue" + + "\xec\x12\xd6\x05+\xd5\x80(\x15z\x10\xf4\xa6\xb2\xad\xec" + + "\x04\xa6c%\x19g\x09+\x82\x95\xca@\x94nu\xe1" + + "Sc\xdcf\xf0\x10\xd3\xb1\xbc\x8c\x8b\x84E\xc1J\x05" + + " J'+\xe8?\x98\x95*\xf0\x0b\xa4T\x01E)" + + "#\xe3!aN\xb0R\x16\x90R\x9e\xff\xc5%)\xfa" + + "\xe9\xf9z\xc9\x8c\x89\x05y\x05V%\x89\xe9\x0c\x0c\x01" + + "\x8c\x8c\x8c<\x0cLhR\x05\x89\xc9\xfc\xd9\x89\xe9\xa9" + + "\xd8e\xf3\x12s\x19qH\xa5\xe4'\xe323/\xbf" + + "\x8491=\x80\x91\x91\x81\x19M&3\xb7\x80=\xbf" + + "\xa8\x04]\x1b\x13X2\xb9\xb4\xb8$?\xb7\xa4\xb2 " + + "\x15f. \x00\x00\xff\xff\x89\xff\x94\xdf" + +func init() { + schemas.Register(schema_d12a1c51fedd6c88, + 0xa574b41924caefc7, + 0xbea97f1023792be0, + 0xc2b96012172f8df1, + 0xc58ad6bd519f935e, + 0xc8768679ec52e012, + 0xe130b601260e44b5, + 0xfa10659ae02f2093) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/BUILD.bazel b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/BUILD.bazel new file mode 100644 index 00000000..b1c7bc61 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["nodemap.go"], + visibility = ["//:__subpackages__"], + deps = [ + "//:go_default_library", + "//internal/schema:go_default_library", + "//schemas:go_default_library", + ], +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/nodemap.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/nodemap.go new file mode 100644 index 00000000..bcacffc7 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/nodemap/nodemap.go @@ -0,0 +1,58 @@ +// Package nodemap provides a schema registry index type. +package nodemap + +import ( + "zombiezen.com/go/capnproto2" + "zombiezen.com/go/capnproto2/internal/schema" + "zombiezen.com/go/capnproto2/schemas" +) + +// Map is a lazy index of a registry. +// The zero value is an index of the default registry. +type Map struct { + reg *schemas.Registry + nodes map[uint64]schema.Node +} + +func (m *Map) registry() *schemas.Registry { + if m.reg != nil { + return m.reg + } + return &schemas.DefaultRegistry +} + +func (m *Map) UseRegistry(reg *schemas.Registry) { + m.reg = reg + m.nodes = make(map[uint64]schema.Node) +} + +// Find returns the node for the given ID. +func (m *Map) Find(id uint64) (schema.Node, error) { + if n := m.nodes[id]; n.IsValid() { + return n, nil + } + data, err := m.registry().Find(id) + if err != nil { + return schema.Node{}, err + } + msg, err := capnp.Unmarshal(data) + if err != nil { + return schema.Node{}, err + } + req, err := schema.ReadRootCodeGeneratorRequest(msg) + if err != nil { + return schema.Node{}, err + } + nodes, err := req.Nodes() + if err != nil { + return schema.Node{}, err + } + if m.nodes == nil { + m.nodes = make(map[uint64]schema.Node) + } + for i := 0; i < nodes.Len(); i++ { + n := nodes.At(i) + m.nodes[n.Id()] = n + } + return m.nodes[id], nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/BUILD.bazel b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/BUILD.bazel new file mode 100644 index 00000000..00374851 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "discard.go", + "discard_go14.go", + "packed.go", + ], + visibility = ["//:__subpackages__"], +) + +go_test( + name = "go_default_test", + srcs = ["packed_test.go"], + data = glob(["testdata/**"]), + library = ":go_default_library", +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard.go new file mode 100644 index 00000000..6bf54628 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard.go @@ -0,0 +1,11 @@ +// +build go1.5 + +package packed + +import ( + "bufio" +) + +func discard(r *bufio.Reader, n int) { + r.Discard(n) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard_go14.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard_go14.go new file mode 100644 index 00000000..a42c391f --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/discard_go14.go @@ -0,0 +1,13 @@ +// +build !go1.5 + +package packed + +import ( + "bufio" + "io" + "io/ioutil" +) + +func discard(r *bufio.Reader, n int) { + io.CopyN(ioutil.Discard, r, int64(n)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/fuzz.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/fuzz.go new file mode 100644 index 00000000..8e10bf5b --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/fuzz.go @@ -0,0 +1,65 @@ +// +build gofuzz + +// Fuzz test harness. To run: +// go-fuzz-build zombiezen.com/go/capnproto2/internal/packed +// go-fuzz -bin=packed-fuzz.zip -workdir=internal/packed/testdata + +package packed + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" +) + +func Fuzz(data []byte) int { + result := 0 + + // Unpacked + if unpacked, err := Unpack(nil, data); err == nil { + checkRepack(unpacked) + result = 1 + } + + // Read + { + r := NewReader(bufio.NewReader(bytes.NewReader(data))) + if unpacked, err := ioutil.ReadAll(r); err == nil { + checkRepack(unpacked) + result = 1 + } + } + + // ReadWord + { + r := NewReader(bufio.NewReader(bytes.NewReader(data))) + var unpacked []byte + var err error + for { + n := len(unpacked) + unpacked = append(unpacked, 0, 0, 0, 0, 0, 0, 0, 0) + if err = r.ReadWord(unpacked[n:]); err != nil { + unpacked = unpacked[:n] + break + } + } + if err == io.EOF { + checkRepack(unpacked) + result = 1 + } + } + + return result +} + +func checkRepack(unpacked []byte) { + packed := Pack(nil, unpacked) + unpacked2, err := Unpack(nil, packed) + if err != nil { + panic("correctness: unpack, pack, unpack gives error: " + err.Error()) + } + if !bytes.Equal(unpacked, unpacked2) { + panic("correctness: unpack, pack, unpack gives different results") + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/packed.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/packed.go new file mode 100644 index 00000000..38573501 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/packed/packed.go @@ -0,0 +1,334 @@ +// Package packed provides functions to read and write the "packed" +// compression scheme described at https://capnproto.org/encoding.html#packing. +package packed + +import ( + "bufio" + "errors" + "io" +) + +const wordSize = 8 + +// Special case tags. +const ( + zeroTag byte = 0x00 + unpackedTag byte = 0xff +) + +// Pack appends the packed version of src to dst and returns the +// resulting slice. len(src) must be a multiple of 8 or Pack panics. +func Pack(dst, src []byte) []byte { + if len(src)%wordSize != 0 { + panic("packed.Pack len(src) must be a multiple of 8") + } + var buf [wordSize]byte + for len(src) > 0 { + var hdr byte + n := 0 + for i := uint(0); i < wordSize; i++ { + if src[i] != 0 { + hdr |= 1 << i + buf[n] = src[i] + n++ + } + } + dst = append(dst, hdr) + dst = append(dst, buf[:n]...) + src = src[wordSize:] + + switch hdr { + case zeroTag: + z := min(numZeroWords(src), 0xff) + dst = append(dst, byte(z)) + src = src[z*wordSize:] + case unpackedTag: + i := 0 + end := min(len(src), 0xff*wordSize) + for i < end { + zeros := 0 + for _, b := range src[i : i+wordSize] { + if b == 0 { + zeros++ + } + } + + if zeros > 1 { + break + } + i += wordSize + } + + rawWords := byte(i / wordSize) + dst = append(dst, rawWords) + dst = append(dst, src[:i]...) + src = src[i:] + } + } + return dst +} + +// numZeroWords returns the number of leading zero words in b. +func numZeroWords(b []byte) int { + for i, bb := range b { + if bb != 0 { + return i / wordSize + } + } + return len(b) / wordSize +} + +// Unpack appends the unpacked version of src to dst and returns the +// resulting slice. +func Unpack(dst, src []byte) ([]byte, error) { + for len(src) > 0 { + tag := src[0] + src = src[1:] + + pstart := len(dst) + dst = allocWords(dst, 1) + p := dst[pstart : pstart+wordSize] + if len(src) >= wordSize { + i := 0 + nz := tag & 1 + p[0] = src[i] & -nz + i += int(nz) + nz = tag >> 1 & 1 + p[1] = src[i] & -nz + i += int(nz) + nz = tag >> 2 & 1 + p[2] = src[i] & -nz + i += int(nz) + nz = tag >> 3 & 1 + p[3] = src[i] & -nz + i += int(nz) + nz = tag >> 4 & 1 + p[4] = src[i] & -nz + i += int(nz) + nz = tag >> 5 & 1 + p[5] = src[i] & -nz + i += int(nz) + nz = tag >> 6 & 1 + p[6] = src[i] & -nz + i += int(nz) + nz = tag >> 7 & 1 + p[7] = src[i] & -nz + i += int(nz) + src = src[i:] + } else { + for i := uint(0); i < wordSize; i++ { + if tag&(1<= target { + pp := p[len(p):target] + for i := range pp { + pp[i] = 0 + } + return p[:target] + } + newcap := cap(p) + doublecap := newcap + newcap + if target > doublecap { + newcap = target + } else { + if len(p) < 1024 { + newcap = doublecap + } else { + for newcap < target { + newcap += newcap / 4 + } + } + } + pp := make([]byte, target, newcap) + copy(pp, p) + return pp +} + +// A Reader decompresses a packed byte stream. +type Reader struct { + // ReadWord state + rd *bufio.Reader + err error + zeroes int + literal int + + // Read state + word [wordSize]byte + wordIdx int +} + +// NewReader returns a reader that decompresses a packed stream from r. +func NewReader(r *bufio.Reader) *Reader { + return &Reader{rd: r, wordIdx: wordSize} +} + +func min(a, b int) int { + if b < a { + return b + } + return a +} + +// ReadWord decompresses the next word from the underlying stream. +func (r *Reader) ReadWord(p []byte) error { + if len(p) < wordSize { + return errors.New("packed: read word buffer too small") + } + r.wordIdx = wordSize // if the caller tries to call ReadWord and Read, don't give them partial words. + if r.err != nil { + err := r.err + r.err = nil + return err + } + p = p[:wordSize] + switch { + case r.zeroes > 0: + r.zeroes-- + for i := range p { + p[i] = 0 + } + return nil + case r.literal > 0: + r.literal-- + _, err := io.ReadFull(r.rd, p) + return err + } + + var tag byte + if r.rd.Buffered() < wordSize+1 { + var err error + tag, err = r.rd.ReadByte() + if err != nil { + return err + } + for i := range p { + p[i] = 0 + } + for i := uint(0); i < wordSize; i++ { + if tag&(1<> 1 & 1 + p[1] = b[i] & -nz + i += int(nz) + nz = tag >> 2 & 1 + p[2] = b[i] & -nz + i += int(nz) + nz = tag >> 3 & 1 + p[3] = b[i] & -nz + i += int(nz) + nz = tag >> 4 & 1 + p[4] = b[i] & -nz + i += int(nz) + nz = tag >> 5 & 1 + p[5] = b[i] & -nz + i += int(nz) + nz = tag >> 6 & 1 + p[6] = b[i] & -nz + i += int(nz) + nz = tag >> 7 & 1 + p[7] = b[i] & -nz + i += int(nz) + discard(r.rd, i) + } + switch tag { + case zeroTag: + z, err := r.rd.ReadByte() + if err == io.EOF { + r.err = io.ErrUnexpectedEOF + return nil + } else if err != nil { + r.err = err + return nil + } + r.zeroes = int(z) + case unpackedTag: + l, err := r.rd.ReadByte() + if err == io.EOF { + r.err = io.ErrUnexpectedEOF + return nil + } else if err != nil { + r.err = err + return nil + } + r.literal = int(l) + } + return nil +} + +// Read reads up to len(p) bytes into p. This will decompress whole +// words at a time, so mixing calls to Read and ReadWord may lead to +// bytes missing. +func (r *Reader) Read(p []byte) (n int, err error) { + if r.wordIdx < wordSize { + n = copy(p, r.word[r.wordIdx:]) + r.wordIdx += n + } + for n < len(p) { + if r.rd.Buffered() < wordSize+1 && n > 0 { + return n, nil + } + if len(p)-n >= wordSize { + err := r.ReadWord(p[n:]) + if err != nil { + return n, err + } + n += wordSize + } else { + err := r.ReadWord(r.word[:]) + if err != nil { + return n, err + } + r.wordIdx = copy(p[n:], r.word[:]) + n += r.wordIdx + } + } + return n, nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/BUILD.bazel b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/BUILD.bazel new file mode 100644 index 00000000..d1c1d0df --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/BUILD.bazel @@ -0,0 +1,8 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["schema.capnp.go"], + visibility = ["//:__subpackages__"], + deps = ["//:go_default_library"], +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/schema.capnp.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/schema.capnp.go new file mode 100644 index 00000000..d2937278 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/schema/schema.capnp.go @@ -0,0 +1,3071 @@ +// Code generated by capnpc-go. DO NOT EDIT. + +package schema + +import ( + math "math" + strconv "strconv" + capnp "zombiezen.com/go/capnproto2" +) + +// Constants defined in schema.capnp. +const ( + Field_noDiscriminant = uint16(65535) +) + +type Node struct{ capnp.Struct } +type Node_structNode Node +type Node_enum Node +type Node_interface Node +type Node_const Node +type Node_annotation Node +type Node_Which uint16 + +const ( + Node_Which_file Node_Which = 0 + Node_Which_structNode Node_Which = 1 + Node_Which_enum Node_Which = 2 + Node_Which_interface Node_Which = 3 + Node_Which_const Node_Which = 4 + Node_Which_annotation Node_Which = 5 +) + +func (w Node_Which) String() string { + const s = "filestructNodeenuminterfaceconstannotation" + switch w { + case Node_Which_file: + return s[0:4] + case Node_Which_structNode: + return s[4:14] + case Node_Which_enum: + return s[14:18] + case Node_Which_interface: + return s[18:27] + case Node_Which_const: + return s[27:32] + case Node_Which_annotation: + return s[32:42] + + } + return "Node_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Node_TypeID is the unique identifier for the type Node. +const Node_TypeID = 0xe682ab4cf923a417 + +func NewNode(s *capnp.Segment) (Node, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 40, PointerCount: 6}) + return Node{st}, err +} + +func NewRootNode(s *capnp.Segment) (Node, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 40, PointerCount: 6}) + return Node{st}, err +} + +func ReadRootNode(msg *capnp.Message) (Node, error) { + root, err := msg.RootPtr() + return Node{root.Struct()}, err +} + +func (s Node) Which() Node_Which { + return Node_Which(s.Struct.Uint16(12)) +} +func (s Node) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s Node) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s Node) DisplayName() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Node) HasDisplayName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Node) DisplayNameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Node) SetDisplayName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Node) DisplayNamePrefixLength() uint32 { + return s.Struct.Uint32(8) +} + +func (s Node) SetDisplayNamePrefixLength(v uint32) { + s.Struct.SetUint32(8, v) +} + +func (s Node) ScopeId() uint64 { + return s.Struct.Uint64(16) +} + +func (s Node) SetScopeId(v uint64) { + s.Struct.SetUint64(16, v) +} + +func (s Node) Parameters() (Node_Parameter_List, error) { + p, err := s.Struct.Ptr(5) + return Node_Parameter_List{List: p.List()}, err +} + +func (s Node) HasParameters() bool { + p, err := s.Struct.Ptr(5) + return p.IsValid() || err != nil +} + +func (s Node) SetParameters(v Node_Parameter_List) error { + return s.Struct.SetPtr(5, v.List.ToPtr()) +} + +// NewParameters sets the parameters field to a newly +// allocated Node_Parameter_List, preferring placement in s's segment. +func (s Node) NewParameters(n int32) (Node_Parameter_List, error) { + l, err := NewNode_Parameter_List(s.Struct.Segment(), n) + if err != nil { + return Node_Parameter_List{}, err + } + err = s.Struct.SetPtr(5, l.List.ToPtr()) + return l, err +} + +func (s Node) IsGeneric() bool { + return s.Struct.Bit(288) +} + +func (s Node) SetIsGeneric(v bool) { + s.Struct.SetBit(288, v) +} + +func (s Node) NestedNodes() (Node_NestedNode_List, error) { + p, err := s.Struct.Ptr(1) + return Node_NestedNode_List{List: p.List()}, err +} + +func (s Node) HasNestedNodes() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Node) SetNestedNodes(v Node_NestedNode_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewNestedNodes sets the nestedNodes field to a newly +// allocated Node_NestedNode_List, preferring placement in s's segment. +func (s Node) NewNestedNodes(n int32) (Node_NestedNode_List, error) { + l, err := NewNode_NestedNode_List(s.Struct.Segment(), n) + if err != nil { + return Node_NestedNode_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +func (s Node) Annotations() (Annotation_List, error) { + p, err := s.Struct.Ptr(2) + return Annotation_List{List: p.List()}, err +} + +func (s Node) HasAnnotations() bool { + p, err := s.Struct.Ptr(2) + return p.IsValid() || err != nil +} + +func (s Node) SetAnnotations(v Annotation_List) error { + return s.Struct.SetPtr(2, v.List.ToPtr()) +} + +// NewAnnotations sets the annotations field to a newly +// allocated Annotation_List, preferring placement in s's segment. +func (s Node) NewAnnotations(n int32) (Annotation_List, error) { + l, err := NewAnnotation_List(s.Struct.Segment(), n) + if err != nil { + return Annotation_List{}, err + } + err = s.Struct.SetPtr(2, l.List.ToPtr()) + return l, err +} + +func (s Node) SetFile() { + s.Struct.SetUint16(12, 0) + +} + +func (s Node) StructNode() Node_structNode { return Node_structNode(s) } + +func (s Node) SetStructNode() { + s.Struct.SetUint16(12, 1) +} + +func (s Node_structNode) DataWordCount() uint16 { + return s.Struct.Uint16(14) +} + +func (s Node_structNode) SetDataWordCount(v uint16) { + s.Struct.SetUint16(14, v) +} + +func (s Node_structNode) PointerCount() uint16 { + return s.Struct.Uint16(24) +} + +func (s Node_structNode) SetPointerCount(v uint16) { + s.Struct.SetUint16(24, v) +} + +func (s Node_structNode) PreferredListEncoding() ElementSize { + return ElementSize(s.Struct.Uint16(26)) +} + +func (s Node_structNode) SetPreferredListEncoding(v ElementSize) { + s.Struct.SetUint16(26, uint16(v)) +} + +func (s Node_structNode) IsGroup() bool { + return s.Struct.Bit(224) +} + +func (s Node_structNode) SetIsGroup(v bool) { + s.Struct.SetBit(224, v) +} + +func (s Node_structNode) DiscriminantCount() uint16 { + return s.Struct.Uint16(30) +} + +func (s Node_structNode) SetDiscriminantCount(v uint16) { + s.Struct.SetUint16(30, v) +} + +func (s Node_structNode) DiscriminantOffset() uint32 { + return s.Struct.Uint32(32) +} + +func (s Node_structNode) SetDiscriminantOffset(v uint32) { + s.Struct.SetUint32(32, v) +} + +func (s Node_structNode) Fields() (Field_List, error) { + p, err := s.Struct.Ptr(3) + return Field_List{List: p.List()}, err +} + +func (s Node_structNode) HasFields() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_structNode) SetFields(v Field_List) error { + return s.Struct.SetPtr(3, v.List.ToPtr()) +} + +// NewFields sets the fields field to a newly +// allocated Field_List, preferring placement in s's segment. +func (s Node_structNode) NewFields(n int32) (Field_List, error) { + l, err := NewField_List(s.Struct.Segment(), n) + if err != nil { + return Field_List{}, err + } + err = s.Struct.SetPtr(3, l.List.ToPtr()) + return l, err +} + +func (s Node) Enum() Node_enum { return Node_enum(s) } + +func (s Node) SetEnum() { + s.Struct.SetUint16(12, 2) +} + +func (s Node_enum) Enumerants() (Enumerant_List, error) { + p, err := s.Struct.Ptr(3) + return Enumerant_List{List: p.List()}, err +} + +func (s Node_enum) HasEnumerants() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_enum) SetEnumerants(v Enumerant_List) error { + return s.Struct.SetPtr(3, v.List.ToPtr()) +} + +// NewEnumerants sets the enumerants field to a newly +// allocated Enumerant_List, preferring placement in s's segment. +func (s Node_enum) NewEnumerants(n int32) (Enumerant_List, error) { + l, err := NewEnumerant_List(s.Struct.Segment(), n) + if err != nil { + return Enumerant_List{}, err + } + err = s.Struct.SetPtr(3, l.List.ToPtr()) + return l, err +} + +func (s Node) Interface() Node_interface { return Node_interface(s) } + +func (s Node) SetInterface() { + s.Struct.SetUint16(12, 3) +} + +func (s Node_interface) Methods() (Method_List, error) { + p, err := s.Struct.Ptr(3) + return Method_List{List: p.List()}, err +} + +func (s Node_interface) HasMethods() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_interface) SetMethods(v Method_List) error { + return s.Struct.SetPtr(3, v.List.ToPtr()) +} + +// NewMethods sets the methods field to a newly +// allocated Method_List, preferring placement in s's segment. +func (s Node_interface) NewMethods(n int32) (Method_List, error) { + l, err := NewMethod_List(s.Struct.Segment(), n) + if err != nil { + return Method_List{}, err + } + err = s.Struct.SetPtr(3, l.List.ToPtr()) + return l, err +} + +func (s Node_interface) Superclasses() (Superclass_List, error) { + p, err := s.Struct.Ptr(4) + return Superclass_List{List: p.List()}, err +} + +func (s Node_interface) HasSuperclasses() bool { + p, err := s.Struct.Ptr(4) + return p.IsValid() || err != nil +} + +func (s Node_interface) SetSuperclasses(v Superclass_List) error { + return s.Struct.SetPtr(4, v.List.ToPtr()) +} + +// NewSuperclasses sets the superclasses field to a newly +// allocated Superclass_List, preferring placement in s's segment. +func (s Node_interface) NewSuperclasses(n int32) (Superclass_List, error) { + l, err := NewSuperclass_List(s.Struct.Segment(), n) + if err != nil { + return Superclass_List{}, err + } + err = s.Struct.SetPtr(4, l.List.ToPtr()) + return l, err +} + +func (s Node) Const() Node_const { return Node_const(s) } + +func (s Node) SetConst() { + s.Struct.SetUint16(12, 4) +} + +func (s Node_const) Type() (Type, error) { + p, err := s.Struct.Ptr(3) + return Type{Struct: p.Struct()}, err +} + +func (s Node_const) HasType() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_const) SetType(v Type) error { + return s.Struct.SetPtr(3, v.Struct.ToPtr()) +} + +// NewType sets the type field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Node_const) NewType() (Type, error) { + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(3, ss.Struct.ToPtr()) + return ss, err +} + +func (s Node_const) Value() (Value, error) { + p, err := s.Struct.Ptr(4) + return Value{Struct: p.Struct()}, err +} + +func (s Node_const) HasValue() bool { + p, err := s.Struct.Ptr(4) + return p.IsValid() || err != nil +} + +func (s Node_const) SetValue(v Value) error { + return s.Struct.SetPtr(4, v.Struct.ToPtr()) +} + +// NewValue sets the value field to a newly +// allocated Value struct, preferring placement in s's segment. +func (s Node_const) NewValue() (Value, error) { + ss, err := NewValue(s.Struct.Segment()) + if err != nil { + return Value{}, err + } + err = s.Struct.SetPtr(4, ss.Struct.ToPtr()) + return ss, err +} + +func (s Node) Annotation() Node_annotation { return Node_annotation(s) } + +func (s Node) SetAnnotation() { + s.Struct.SetUint16(12, 5) +} + +func (s Node_annotation) Type() (Type, error) { + p, err := s.Struct.Ptr(3) + return Type{Struct: p.Struct()}, err +} + +func (s Node_annotation) HasType() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Node_annotation) SetType(v Type) error { + return s.Struct.SetPtr(3, v.Struct.ToPtr()) +} + +// NewType sets the type field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Node_annotation) NewType() (Type, error) { + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(3, ss.Struct.ToPtr()) + return ss, err +} + +func (s Node_annotation) TargetsFile() bool { + return s.Struct.Bit(112) +} + +func (s Node_annotation) SetTargetsFile(v bool) { + s.Struct.SetBit(112, v) +} + +func (s Node_annotation) TargetsConst() bool { + return s.Struct.Bit(113) +} + +func (s Node_annotation) SetTargetsConst(v bool) { + s.Struct.SetBit(113, v) +} + +func (s Node_annotation) TargetsEnum() bool { + return s.Struct.Bit(114) +} + +func (s Node_annotation) SetTargetsEnum(v bool) { + s.Struct.SetBit(114, v) +} + +func (s Node_annotation) TargetsEnumerant() bool { + return s.Struct.Bit(115) +} + +func (s Node_annotation) SetTargetsEnumerant(v bool) { + s.Struct.SetBit(115, v) +} + +func (s Node_annotation) TargetsStruct() bool { + return s.Struct.Bit(116) +} + +func (s Node_annotation) SetTargetsStruct(v bool) { + s.Struct.SetBit(116, v) +} + +func (s Node_annotation) TargetsField() bool { + return s.Struct.Bit(117) +} + +func (s Node_annotation) SetTargetsField(v bool) { + s.Struct.SetBit(117, v) +} + +func (s Node_annotation) TargetsUnion() bool { + return s.Struct.Bit(118) +} + +func (s Node_annotation) SetTargetsUnion(v bool) { + s.Struct.SetBit(118, v) +} + +func (s Node_annotation) TargetsGroup() bool { + return s.Struct.Bit(119) +} + +func (s Node_annotation) SetTargetsGroup(v bool) { + s.Struct.SetBit(119, v) +} + +func (s Node_annotation) TargetsInterface() bool { + return s.Struct.Bit(120) +} + +func (s Node_annotation) SetTargetsInterface(v bool) { + s.Struct.SetBit(120, v) +} + +func (s Node_annotation) TargetsMethod() bool { + return s.Struct.Bit(121) +} + +func (s Node_annotation) SetTargetsMethod(v bool) { + s.Struct.SetBit(121, v) +} + +func (s Node_annotation) TargetsParam() bool { + return s.Struct.Bit(122) +} + +func (s Node_annotation) SetTargetsParam(v bool) { + s.Struct.SetBit(122, v) +} + +func (s Node_annotation) TargetsAnnotation() bool { + return s.Struct.Bit(123) +} + +func (s Node_annotation) SetTargetsAnnotation(v bool) { + s.Struct.SetBit(123, v) +} + +// Node_List is a list of Node. +type Node_List struct{ capnp.List } + +// NewNode creates a new list of Node. +func NewNode_List(s *capnp.Segment, sz int32) (Node_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 40, PointerCount: 6}, sz) + return Node_List{l}, err +} + +func (s Node_List) At(i int) Node { return Node{s.List.Struct(i)} } + +func (s Node_List) Set(i int, v Node) error { return s.List.SetStruct(i, v.Struct) } + +// Node_Promise is a wrapper for a Node promised by a client call. +type Node_Promise struct{ *capnp.Pipeline } + +func (p Node_Promise) Struct() (Node, error) { + s, err := p.Pipeline.Struct() + return Node{s}, err +} + +func (p Node_Promise) StructNode() Node_structNode_Promise { return Node_structNode_Promise{p.Pipeline} } + +// Node_structNode_Promise is a wrapper for a Node_structNode promised by a client call. +type Node_structNode_Promise struct{ *capnp.Pipeline } + +func (p Node_structNode_Promise) Struct() (Node_structNode, error) { + s, err := p.Pipeline.Struct() + return Node_structNode{s}, err +} + +func (p Node_Promise) Enum() Node_enum_Promise { return Node_enum_Promise{p.Pipeline} } + +// Node_enum_Promise is a wrapper for a Node_enum promised by a client call. +type Node_enum_Promise struct{ *capnp.Pipeline } + +func (p Node_enum_Promise) Struct() (Node_enum, error) { + s, err := p.Pipeline.Struct() + return Node_enum{s}, err +} + +func (p Node_Promise) Interface() Node_interface_Promise { return Node_interface_Promise{p.Pipeline} } + +// Node_interface_Promise is a wrapper for a Node_interface promised by a client call. +type Node_interface_Promise struct{ *capnp.Pipeline } + +func (p Node_interface_Promise) Struct() (Node_interface, error) { + s, err := p.Pipeline.Struct() + return Node_interface{s}, err +} + +func (p Node_Promise) Const() Node_const_Promise { return Node_const_Promise{p.Pipeline} } + +// Node_const_Promise is a wrapper for a Node_const promised by a client call. +type Node_const_Promise struct{ *capnp.Pipeline } + +func (p Node_const_Promise) Struct() (Node_const, error) { + s, err := p.Pipeline.Struct() + return Node_const{s}, err +} + +func (p Node_const_Promise) Type() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(3)} +} + +func (p Node_const_Promise) Value() Value_Promise { + return Value_Promise{Pipeline: p.Pipeline.GetPipeline(4)} +} + +func (p Node_Promise) Annotation() Node_annotation_Promise { return Node_annotation_Promise{p.Pipeline} } + +// Node_annotation_Promise is a wrapper for a Node_annotation promised by a client call. +type Node_annotation_Promise struct{ *capnp.Pipeline } + +func (p Node_annotation_Promise) Struct() (Node_annotation, error) { + s, err := p.Pipeline.Struct() + return Node_annotation{s}, err +} + +func (p Node_annotation_Promise) Type() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(3)} +} + +type Node_Parameter struct{ capnp.Struct } + +// Node_Parameter_TypeID is the unique identifier for the type Node_Parameter. +const Node_Parameter_TypeID = 0xb9521bccf10fa3b1 + +func NewNode_Parameter(s *capnp.Segment) (Node_Parameter, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Node_Parameter{st}, err +} + +func NewRootNode_Parameter(s *capnp.Segment) (Node_Parameter, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Node_Parameter{st}, err +} + +func ReadRootNode_Parameter(msg *capnp.Message) (Node_Parameter, error) { + root, err := msg.RootPtr() + return Node_Parameter{root.Struct()}, err +} + +func (s Node_Parameter) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Node_Parameter) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Node_Parameter) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Node_Parameter) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +// Node_Parameter_List is a list of Node_Parameter. +type Node_Parameter_List struct{ capnp.List } + +// NewNode_Parameter creates a new list of Node_Parameter. +func NewNode_Parameter_List(s *capnp.Segment, sz int32) (Node_Parameter_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return Node_Parameter_List{l}, err +} + +func (s Node_Parameter_List) At(i int) Node_Parameter { return Node_Parameter{s.List.Struct(i)} } + +func (s Node_Parameter_List) Set(i int, v Node_Parameter) error { return s.List.SetStruct(i, v.Struct) } + +// Node_Parameter_Promise is a wrapper for a Node_Parameter promised by a client call. +type Node_Parameter_Promise struct{ *capnp.Pipeline } + +func (p Node_Parameter_Promise) Struct() (Node_Parameter, error) { + s, err := p.Pipeline.Struct() + return Node_Parameter{s}, err +} + +type Node_NestedNode struct{ capnp.Struct } + +// Node_NestedNode_TypeID is the unique identifier for the type Node_NestedNode. +const Node_NestedNode_TypeID = 0xdebf55bbfa0fc242 + +func NewNode_NestedNode(s *capnp.Segment) (Node_NestedNode, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Node_NestedNode{st}, err +} + +func NewRootNode_NestedNode(s *capnp.Segment) (Node_NestedNode, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Node_NestedNode{st}, err +} + +func ReadRootNode_NestedNode(msg *capnp.Message) (Node_NestedNode, error) { + root, err := msg.RootPtr() + return Node_NestedNode{root.Struct()}, err +} + +func (s Node_NestedNode) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Node_NestedNode) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Node_NestedNode) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Node_NestedNode) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Node_NestedNode) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s Node_NestedNode) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +// Node_NestedNode_List is a list of Node_NestedNode. +type Node_NestedNode_List struct{ capnp.List } + +// NewNode_NestedNode creates a new list of Node_NestedNode. +func NewNode_NestedNode_List(s *capnp.Segment, sz int32) (Node_NestedNode_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz) + return Node_NestedNode_List{l}, err +} + +func (s Node_NestedNode_List) At(i int) Node_NestedNode { return Node_NestedNode{s.List.Struct(i)} } + +func (s Node_NestedNode_List) Set(i int, v Node_NestedNode) error { + return s.List.SetStruct(i, v.Struct) +} + +// Node_NestedNode_Promise is a wrapper for a Node_NestedNode promised by a client call. +type Node_NestedNode_Promise struct{ *capnp.Pipeline } + +func (p Node_NestedNode_Promise) Struct() (Node_NestedNode, error) { + s, err := p.Pipeline.Struct() + return Node_NestedNode{s}, err +} + +type Field struct{ capnp.Struct } +type Field_slot Field +type Field_group Field +type Field_ordinal Field +type Field_Which uint16 + +const ( + Field_Which_slot Field_Which = 0 + Field_Which_group Field_Which = 1 +) + +func (w Field_Which) String() string { + const s = "slotgroup" + switch w { + case Field_Which_slot: + return s[0:4] + case Field_Which_group: + return s[4:9] + + } + return "Field_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +type Field_ordinal_Which uint16 + +const ( + Field_ordinal_Which_implicit Field_ordinal_Which = 0 + Field_ordinal_Which_explicit Field_ordinal_Which = 1 +) + +func (w Field_ordinal_Which) String() string { + const s = "implicitexplicit" + switch w { + case Field_ordinal_Which_implicit: + return s[0:8] + case Field_ordinal_Which_explicit: + return s[8:16] + + } + return "Field_ordinal_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Field_TypeID is the unique identifier for the type Field. +const Field_TypeID = 0x9aad50a41f4af45f + +func NewField(s *capnp.Segment) (Field, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 4}) + return Field{st}, err +} + +func NewRootField(s *capnp.Segment) (Field, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 4}) + return Field{st}, err +} + +func ReadRootField(msg *capnp.Message) (Field, error) { + root, err := msg.RootPtr() + return Field{root.Struct()}, err +} + +func (s Field) Which() Field_Which { + return Field_Which(s.Struct.Uint16(8)) +} +func (s Field) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Field) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Field) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Field) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Field) CodeOrder() uint16 { + return s.Struct.Uint16(0) +} + +func (s Field) SetCodeOrder(v uint16) { + s.Struct.SetUint16(0, v) +} + +func (s Field) Annotations() (Annotation_List, error) { + p, err := s.Struct.Ptr(1) + return Annotation_List{List: p.List()}, err +} + +func (s Field) HasAnnotations() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Field) SetAnnotations(v Annotation_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewAnnotations sets the annotations field to a newly +// allocated Annotation_List, preferring placement in s's segment. +func (s Field) NewAnnotations(n int32) (Annotation_List, error) { + l, err := NewAnnotation_List(s.Struct.Segment(), n) + if err != nil { + return Annotation_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +func (s Field) DiscriminantValue() uint16 { + return s.Struct.Uint16(2) ^ 65535 +} + +func (s Field) SetDiscriminantValue(v uint16) { + s.Struct.SetUint16(2, v^65535) +} + +func (s Field) Slot() Field_slot { return Field_slot(s) } + +func (s Field) SetSlot() { + s.Struct.SetUint16(8, 0) +} + +func (s Field_slot) Offset() uint32 { + return s.Struct.Uint32(4) +} + +func (s Field_slot) SetOffset(v uint32) { + s.Struct.SetUint32(4, v) +} + +func (s Field_slot) Type() (Type, error) { + p, err := s.Struct.Ptr(2) + return Type{Struct: p.Struct()}, err +} + +func (s Field_slot) HasType() bool { + p, err := s.Struct.Ptr(2) + return p.IsValid() || err != nil +} + +func (s Field_slot) SetType(v Type) error { + return s.Struct.SetPtr(2, v.Struct.ToPtr()) +} + +// NewType sets the type field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Field_slot) NewType() (Type, error) { + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(2, ss.Struct.ToPtr()) + return ss, err +} + +func (s Field_slot) DefaultValue() (Value, error) { + p, err := s.Struct.Ptr(3) + return Value{Struct: p.Struct()}, err +} + +func (s Field_slot) HasDefaultValue() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Field_slot) SetDefaultValue(v Value) error { + return s.Struct.SetPtr(3, v.Struct.ToPtr()) +} + +// NewDefaultValue sets the defaultValue field to a newly +// allocated Value struct, preferring placement in s's segment. +func (s Field_slot) NewDefaultValue() (Value, error) { + ss, err := NewValue(s.Struct.Segment()) + if err != nil { + return Value{}, err + } + err = s.Struct.SetPtr(3, ss.Struct.ToPtr()) + return ss, err +} + +func (s Field_slot) HadExplicitDefault() bool { + return s.Struct.Bit(128) +} + +func (s Field_slot) SetHadExplicitDefault(v bool) { + s.Struct.SetBit(128, v) +} + +func (s Field) Group() Field_group { return Field_group(s) } + +func (s Field) SetGroup() { + s.Struct.SetUint16(8, 1) +} + +func (s Field_group) TypeId() uint64 { + return s.Struct.Uint64(16) +} + +func (s Field_group) SetTypeId(v uint64) { + s.Struct.SetUint64(16, v) +} + +func (s Field) Ordinal() Field_ordinal { return Field_ordinal(s) } + +func (s Field_ordinal) Which() Field_ordinal_Which { + return Field_ordinal_Which(s.Struct.Uint16(10)) +} +func (s Field_ordinal) SetImplicit() { + s.Struct.SetUint16(10, 0) + +} + +func (s Field_ordinal) Explicit() uint16 { + return s.Struct.Uint16(12) +} + +func (s Field_ordinal) SetExplicit(v uint16) { + s.Struct.SetUint16(10, 1) + s.Struct.SetUint16(12, v) +} + +// Field_List is a list of Field. +type Field_List struct{ capnp.List } + +// NewField creates a new list of Field. +func NewField_List(s *capnp.Segment, sz int32) (Field_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 4}, sz) + return Field_List{l}, err +} + +func (s Field_List) At(i int) Field { return Field{s.List.Struct(i)} } + +func (s Field_List) Set(i int, v Field) error { return s.List.SetStruct(i, v.Struct) } + +// Field_Promise is a wrapper for a Field promised by a client call. +type Field_Promise struct{ *capnp.Pipeline } + +func (p Field_Promise) Struct() (Field, error) { + s, err := p.Pipeline.Struct() + return Field{s}, err +} + +func (p Field_Promise) Slot() Field_slot_Promise { return Field_slot_Promise{p.Pipeline} } + +// Field_slot_Promise is a wrapper for a Field_slot promised by a client call. +type Field_slot_Promise struct{ *capnp.Pipeline } + +func (p Field_slot_Promise) Struct() (Field_slot, error) { + s, err := p.Pipeline.Struct() + return Field_slot{s}, err +} + +func (p Field_slot_Promise) Type() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(2)} +} + +func (p Field_slot_Promise) DefaultValue() Value_Promise { + return Value_Promise{Pipeline: p.Pipeline.GetPipeline(3)} +} + +func (p Field_Promise) Group() Field_group_Promise { return Field_group_Promise{p.Pipeline} } + +// Field_group_Promise is a wrapper for a Field_group promised by a client call. +type Field_group_Promise struct{ *capnp.Pipeline } + +func (p Field_group_Promise) Struct() (Field_group, error) { + s, err := p.Pipeline.Struct() + return Field_group{s}, err +} + +func (p Field_Promise) Ordinal() Field_ordinal_Promise { return Field_ordinal_Promise{p.Pipeline} } + +// Field_ordinal_Promise is a wrapper for a Field_ordinal promised by a client call. +type Field_ordinal_Promise struct{ *capnp.Pipeline } + +func (p Field_ordinal_Promise) Struct() (Field_ordinal, error) { + s, err := p.Pipeline.Struct() + return Field_ordinal{s}, err +} + +type Enumerant struct{ capnp.Struct } + +// Enumerant_TypeID is the unique identifier for the type Enumerant. +const Enumerant_TypeID = 0x978a7cebdc549a4d + +func NewEnumerant(s *capnp.Segment) (Enumerant, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Enumerant{st}, err +} + +func NewRootEnumerant(s *capnp.Segment) (Enumerant, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Enumerant{st}, err +} + +func ReadRootEnumerant(msg *capnp.Message) (Enumerant, error) { + root, err := msg.RootPtr() + return Enumerant{root.Struct()}, err +} + +func (s Enumerant) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Enumerant) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Enumerant) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Enumerant) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Enumerant) CodeOrder() uint16 { + return s.Struct.Uint16(0) +} + +func (s Enumerant) SetCodeOrder(v uint16) { + s.Struct.SetUint16(0, v) +} + +func (s Enumerant) Annotations() (Annotation_List, error) { + p, err := s.Struct.Ptr(1) + return Annotation_List{List: p.List()}, err +} + +func (s Enumerant) HasAnnotations() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Enumerant) SetAnnotations(v Annotation_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewAnnotations sets the annotations field to a newly +// allocated Annotation_List, preferring placement in s's segment. +func (s Enumerant) NewAnnotations(n int32) (Annotation_List, error) { + l, err := NewAnnotation_List(s.Struct.Segment(), n) + if err != nil { + return Annotation_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +// Enumerant_List is a list of Enumerant. +type Enumerant_List struct{ capnp.List } + +// NewEnumerant creates a new list of Enumerant. +func NewEnumerant_List(s *capnp.Segment, sz int32) (Enumerant_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz) + return Enumerant_List{l}, err +} + +func (s Enumerant_List) At(i int) Enumerant { return Enumerant{s.List.Struct(i)} } + +func (s Enumerant_List) Set(i int, v Enumerant) error { return s.List.SetStruct(i, v.Struct) } + +// Enumerant_Promise is a wrapper for a Enumerant promised by a client call. +type Enumerant_Promise struct{ *capnp.Pipeline } + +func (p Enumerant_Promise) Struct() (Enumerant, error) { + s, err := p.Pipeline.Struct() + return Enumerant{s}, err +} + +type Superclass struct{ capnp.Struct } + +// Superclass_TypeID is the unique identifier for the type Superclass. +const Superclass_TypeID = 0xa9962a9ed0a4d7f8 + +func NewSuperclass(s *capnp.Segment) (Superclass, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Superclass{st}, err +} + +func NewRootSuperclass(s *capnp.Segment) (Superclass, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Superclass{st}, err +} + +func ReadRootSuperclass(msg *capnp.Message) (Superclass, error) { + root, err := msg.RootPtr() + return Superclass{root.Struct()}, err +} + +func (s Superclass) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s Superclass) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s Superclass) Brand() (Brand, error) { + p, err := s.Struct.Ptr(0) + return Brand{Struct: p.Struct()}, err +} + +func (s Superclass) HasBrand() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Superclass) SetBrand(v Brand) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Superclass) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// Superclass_List is a list of Superclass. +type Superclass_List struct{ capnp.List } + +// NewSuperclass creates a new list of Superclass. +func NewSuperclass_List(s *capnp.Segment, sz int32) (Superclass_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz) + return Superclass_List{l}, err +} + +func (s Superclass_List) At(i int) Superclass { return Superclass{s.List.Struct(i)} } + +func (s Superclass_List) Set(i int, v Superclass) error { return s.List.SetStruct(i, v.Struct) } + +// Superclass_Promise is a wrapper for a Superclass promised by a client call. +type Superclass_Promise struct{ *capnp.Pipeline } + +func (p Superclass_Promise) Struct() (Superclass, error) { + s, err := p.Pipeline.Struct() + return Superclass{s}, err +} + +func (p Superclass_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +type Method struct{ capnp.Struct } + +// Method_TypeID is the unique identifier for the type Method. +const Method_TypeID = 0x9500cce23b334d80 + +func NewMethod(s *capnp.Segment) (Method, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 5}) + return Method{st}, err +} + +func NewRootMethod(s *capnp.Segment) (Method, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 5}) + return Method{st}, err +} + +func ReadRootMethod(msg *capnp.Message) (Method, error) { + root, err := msg.RootPtr() + return Method{root.Struct()}, err +} + +func (s Method) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Method) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Method) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Method) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +func (s Method) CodeOrder() uint16 { + return s.Struct.Uint16(0) +} + +func (s Method) SetCodeOrder(v uint16) { + s.Struct.SetUint16(0, v) +} + +func (s Method) ImplicitParameters() (Node_Parameter_List, error) { + p, err := s.Struct.Ptr(4) + return Node_Parameter_List{List: p.List()}, err +} + +func (s Method) HasImplicitParameters() bool { + p, err := s.Struct.Ptr(4) + return p.IsValid() || err != nil +} + +func (s Method) SetImplicitParameters(v Node_Parameter_List) error { + return s.Struct.SetPtr(4, v.List.ToPtr()) +} + +// NewImplicitParameters sets the implicitParameters field to a newly +// allocated Node_Parameter_List, preferring placement in s's segment. +func (s Method) NewImplicitParameters(n int32) (Node_Parameter_List, error) { + l, err := NewNode_Parameter_List(s.Struct.Segment(), n) + if err != nil { + return Node_Parameter_List{}, err + } + err = s.Struct.SetPtr(4, l.List.ToPtr()) + return l, err +} + +func (s Method) ParamStructType() uint64 { + return s.Struct.Uint64(8) +} + +func (s Method) SetParamStructType(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s Method) ParamBrand() (Brand, error) { + p, err := s.Struct.Ptr(2) + return Brand{Struct: p.Struct()}, err +} + +func (s Method) HasParamBrand() bool { + p, err := s.Struct.Ptr(2) + return p.IsValid() || err != nil +} + +func (s Method) SetParamBrand(v Brand) error { + return s.Struct.SetPtr(2, v.Struct.ToPtr()) +} + +// NewParamBrand sets the paramBrand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Method) NewParamBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(2, ss.Struct.ToPtr()) + return ss, err +} + +func (s Method) ResultStructType() uint64 { + return s.Struct.Uint64(16) +} + +func (s Method) SetResultStructType(v uint64) { + s.Struct.SetUint64(16, v) +} + +func (s Method) ResultBrand() (Brand, error) { + p, err := s.Struct.Ptr(3) + return Brand{Struct: p.Struct()}, err +} + +func (s Method) HasResultBrand() bool { + p, err := s.Struct.Ptr(3) + return p.IsValid() || err != nil +} + +func (s Method) SetResultBrand(v Brand) error { + return s.Struct.SetPtr(3, v.Struct.ToPtr()) +} + +// NewResultBrand sets the resultBrand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Method) NewResultBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(3, ss.Struct.ToPtr()) + return ss, err +} + +func (s Method) Annotations() (Annotation_List, error) { + p, err := s.Struct.Ptr(1) + return Annotation_List{List: p.List()}, err +} + +func (s Method) HasAnnotations() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Method) SetAnnotations(v Annotation_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewAnnotations sets the annotations field to a newly +// allocated Annotation_List, preferring placement in s's segment. +func (s Method) NewAnnotations(n int32) (Annotation_List, error) { + l, err := NewAnnotation_List(s.Struct.Segment(), n) + if err != nil { + return Annotation_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +// Method_List is a list of Method. +type Method_List struct{ capnp.List } + +// NewMethod creates a new list of Method. +func NewMethod_List(s *capnp.Segment, sz int32) (Method_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 5}, sz) + return Method_List{l}, err +} + +func (s Method_List) At(i int) Method { return Method{s.List.Struct(i)} } + +func (s Method_List) Set(i int, v Method) error { return s.List.SetStruct(i, v.Struct) } + +// Method_Promise is a wrapper for a Method promised by a client call. +type Method_Promise struct{ *capnp.Pipeline } + +func (p Method_Promise) Struct() (Method, error) { + s, err := p.Pipeline.Struct() + return Method{s}, err +} + +func (p Method_Promise) ParamBrand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(2)} +} + +func (p Method_Promise) ResultBrand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(3)} +} + +type Type struct{ capnp.Struct } +type Type_list Type +type Type_enum Type +type Type_structType Type +type Type_interface Type +type Type_anyPointer Type +type Type_anyPointer_unconstrained Type +type Type_anyPointer_parameter Type +type Type_anyPointer_implicitMethodParameter Type +type Type_Which uint16 + +const ( + Type_Which_void Type_Which = 0 + Type_Which_bool Type_Which = 1 + Type_Which_int8 Type_Which = 2 + Type_Which_int16 Type_Which = 3 + Type_Which_int32 Type_Which = 4 + Type_Which_int64 Type_Which = 5 + Type_Which_uint8 Type_Which = 6 + Type_Which_uint16 Type_Which = 7 + Type_Which_uint32 Type_Which = 8 + Type_Which_uint64 Type_Which = 9 + Type_Which_float32 Type_Which = 10 + Type_Which_float64 Type_Which = 11 + Type_Which_text Type_Which = 12 + Type_Which_data Type_Which = 13 + Type_Which_list Type_Which = 14 + Type_Which_enum Type_Which = 15 + Type_Which_structType Type_Which = 16 + Type_Which_interface Type_Which = 17 + Type_Which_anyPointer Type_Which = 18 +) + +func (w Type_Which) String() string { + const s = "voidboolint8int16int32int64uint8uint16uint32uint64float32float64textdatalistenumstructTypeinterfaceanyPointer" + switch w { + case Type_Which_void: + return s[0:4] + case Type_Which_bool: + return s[4:8] + case Type_Which_int8: + return s[8:12] + case Type_Which_int16: + return s[12:17] + case Type_Which_int32: + return s[17:22] + case Type_Which_int64: + return s[22:27] + case Type_Which_uint8: + return s[27:32] + case Type_Which_uint16: + return s[32:38] + case Type_Which_uint32: + return s[38:44] + case Type_Which_uint64: + return s[44:50] + case Type_Which_float32: + return s[50:57] + case Type_Which_float64: + return s[57:64] + case Type_Which_text: + return s[64:68] + case Type_Which_data: + return s[68:72] + case Type_Which_list: + return s[72:76] + case Type_Which_enum: + return s[76:80] + case Type_Which_structType: + return s[80:90] + case Type_Which_interface: + return s[90:99] + case Type_Which_anyPointer: + return s[99:109] + + } + return "Type_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +type Type_anyPointer_Which uint16 + +const ( + Type_anyPointer_Which_unconstrained Type_anyPointer_Which = 0 + Type_anyPointer_Which_parameter Type_anyPointer_Which = 1 + Type_anyPointer_Which_implicitMethodParameter Type_anyPointer_Which = 2 +) + +func (w Type_anyPointer_Which) String() string { + const s = "unconstrainedparameterimplicitMethodParameter" + switch w { + case Type_anyPointer_Which_unconstrained: + return s[0:13] + case Type_anyPointer_Which_parameter: + return s[13:22] + case Type_anyPointer_Which_implicitMethodParameter: + return s[22:45] + + } + return "Type_anyPointer_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +type Type_anyPointer_unconstrained_Which uint16 + +const ( + Type_anyPointer_unconstrained_Which_anyKind Type_anyPointer_unconstrained_Which = 0 + Type_anyPointer_unconstrained_Which_struct Type_anyPointer_unconstrained_Which = 1 + Type_anyPointer_unconstrained_Which_list Type_anyPointer_unconstrained_Which = 2 + Type_anyPointer_unconstrained_Which_capability Type_anyPointer_unconstrained_Which = 3 +) + +func (w Type_anyPointer_unconstrained_Which) String() string { + const s = "anyKindstructlistcapability" + switch w { + case Type_anyPointer_unconstrained_Which_anyKind: + return s[0:7] + case Type_anyPointer_unconstrained_Which_struct: + return s[7:13] + case Type_anyPointer_unconstrained_Which_list: + return s[13:17] + case Type_anyPointer_unconstrained_Which_capability: + return s[17:27] + + } + return "Type_anyPointer_unconstrained_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Type_TypeID is the unique identifier for the type Type. +const Type_TypeID = 0xd07378ede1f9cc60 + +func NewType(s *capnp.Segment) (Type, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 1}) + return Type{st}, err +} + +func NewRootType(s *capnp.Segment) (Type, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 1}) + return Type{st}, err +} + +func ReadRootType(msg *capnp.Message) (Type, error) { + root, err := msg.RootPtr() + return Type{root.Struct()}, err +} + +func (s Type) Which() Type_Which { + return Type_Which(s.Struct.Uint16(0)) +} +func (s Type) SetVoid() { + s.Struct.SetUint16(0, 0) + +} + +func (s Type) SetBool() { + s.Struct.SetUint16(0, 1) + +} + +func (s Type) SetInt8() { + s.Struct.SetUint16(0, 2) + +} + +func (s Type) SetInt16() { + s.Struct.SetUint16(0, 3) + +} + +func (s Type) SetInt32() { + s.Struct.SetUint16(0, 4) + +} + +func (s Type) SetInt64() { + s.Struct.SetUint16(0, 5) + +} + +func (s Type) SetUint8() { + s.Struct.SetUint16(0, 6) + +} + +func (s Type) SetUint16() { + s.Struct.SetUint16(0, 7) + +} + +func (s Type) SetUint32() { + s.Struct.SetUint16(0, 8) + +} + +func (s Type) SetUint64() { + s.Struct.SetUint16(0, 9) + +} + +func (s Type) SetFloat32() { + s.Struct.SetUint16(0, 10) + +} + +func (s Type) SetFloat64() { + s.Struct.SetUint16(0, 11) + +} + +func (s Type) SetText() { + s.Struct.SetUint16(0, 12) + +} + +func (s Type) SetData() { + s.Struct.SetUint16(0, 13) + +} + +func (s Type) List() Type_list { return Type_list(s) } + +func (s Type) SetList() { + s.Struct.SetUint16(0, 14) +} + +func (s Type_list) ElementType() (Type, error) { + p, err := s.Struct.Ptr(0) + return Type{Struct: p.Struct()}, err +} + +func (s Type_list) HasElementType() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Type_list) SetElementType(v Type) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewElementType sets the elementType field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Type_list) NewElementType() (Type, error) { + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s Type) Enum() Type_enum { return Type_enum(s) } + +func (s Type) SetEnum() { + s.Struct.SetUint16(0, 15) +} + +func (s Type_enum) TypeId() uint64 { + return s.Struct.Uint64(8) +} + +func (s Type_enum) SetTypeId(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s Type_enum) Brand() (Brand, error) { + p, err := s.Struct.Ptr(0) + return Brand{Struct: p.Struct()}, err +} + +func (s Type_enum) HasBrand() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Type_enum) SetBrand(v Brand) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Type_enum) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s Type) StructType() Type_structType { return Type_structType(s) } + +func (s Type) SetStructType() { + s.Struct.SetUint16(0, 16) +} + +func (s Type_structType) TypeId() uint64 { + return s.Struct.Uint64(8) +} + +func (s Type_structType) SetTypeId(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s Type_structType) Brand() (Brand, error) { + p, err := s.Struct.Ptr(0) + return Brand{Struct: p.Struct()}, err +} + +func (s Type_structType) HasBrand() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Type_structType) SetBrand(v Brand) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Type_structType) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s Type) Interface() Type_interface { return Type_interface(s) } + +func (s Type) SetInterface() { + s.Struct.SetUint16(0, 17) +} + +func (s Type_interface) TypeId() uint64 { + return s.Struct.Uint64(8) +} + +func (s Type_interface) SetTypeId(v uint64) { + s.Struct.SetUint64(8, v) +} + +func (s Type_interface) Brand() (Brand, error) { + p, err := s.Struct.Ptr(0) + return Brand{Struct: p.Struct()}, err +} + +func (s Type_interface) HasBrand() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Type_interface) SetBrand(v Brand) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Type_interface) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s Type) AnyPointer() Type_anyPointer { return Type_anyPointer(s) } + +func (s Type) SetAnyPointer() { + s.Struct.SetUint16(0, 18) +} + +func (s Type_anyPointer) Which() Type_anyPointer_Which { + return Type_anyPointer_Which(s.Struct.Uint16(8)) +} +func (s Type_anyPointer) Unconstrained() Type_anyPointer_unconstrained { + return Type_anyPointer_unconstrained(s) +} + +func (s Type_anyPointer) SetUnconstrained() { + s.Struct.SetUint16(8, 0) +} + +func (s Type_anyPointer_unconstrained) Which() Type_anyPointer_unconstrained_Which { + return Type_anyPointer_unconstrained_Which(s.Struct.Uint16(10)) +} +func (s Type_anyPointer_unconstrained) SetAnyKind() { + s.Struct.SetUint16(10, 0) + +} + +func (s Type_anyPointer_unconstrained) SetStruct() { + s.Struct.SetUint16(10, 1) + +} + +func (s Type_anyPointer_unconstrained) SetList() { + s.Struct.SetUint16(10, 2) + +} + +func (s Type_anyPointer_unconstrained) SetCapability() { + s.Struct.SetUint16(10, 3) + +} + +func (s Type_anyPointer) Parameter() Type_anyPointer_parameter { return Type_anyPointer_parameter(s) } + +func (s Type_anyPointer) SetParameter() { + s.Struct.SetUint16(8, 1) +} + +func (s Type_anyPointer_parameter) ScopeId() uint64 { + return s.Struct.Uint64(16) +} + +func (s Type_anyPointer_parameter) SetScopeId(v uint64) { + s.Struct.SetUint64(16, v) +} + +func (s Type_anyPointer_parameter) ParameterIndex() uint16 { + return s.Struct.Uint16(10) +} + +func (s Type_anyPointer_parameter) SetParameterIndex(v uint16) { + s.Struct.SetUint16(10, v) +} + +func (s Type_anyPointer) ImplicitMethodParameter() Type_anyPointer_implicitMethodParameter { + return Type_anyPointer_implicitMethodParameter(s) +} + +func (s Type_anyPointer) SetImplicitMethodParameter() { + s.Struct.SetUint16(8, 2) +} + +func (s Type_anyPointer_implicitMethodParameter) ParameterIndex() uint16 { + return s.Struct.Uint16(10) +} + +func (s Type_anyPointer_implicitMethodParameter) SetParameterIndex(v uint16) { + s.Struct.SetUint16(10, v) +} + +// Type_List is a list of Type. +type Type_List struct{ capnp.List } + +// NewType creates a new list of Type. +func NewType_List(s *capnp.Segment, sz int32) (Type_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 1}, sz) + return Type_List{l}, err +} + +func (s Type_List) At(i int) Type { return Type{s.List.Struct(i)} } + +func (s Type_List) Set(i int, v Type) error { return s.List.SetStruct(i, v.Struct) } + +// Type_Promise is a wrapper for a Type promised by a client call. +type Type_Promise struct{ *capnp.Pipeline } + +func (p Type_Promise) Struct() (Type, error) { + s, err := p.Pipeline.Struct() + return Type{s}, err +} + +func (p Type_Promise) List() Type_list_Promise { return Type_list_Promise{p.Pipeline} } + +// Type_list_Promise is a wrapper for a Type_list promised by a client call. +type Type_list_Promise struct{ *capnp.Pipeline } + +func (p Type_list_Promise) Struct() (Type_list, error) { + s, err := p.Pipeline.Struct() + return Type_list{s}, err +} + +func (p Type_list_Promise) ElementType() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p Type_Promise) Enum() Type_enum_Promise { return Type_enum_Promise{p.Pipeline} } + +// Type_enum_Promise is a wrapper for a Type_enum promised by a client call. +type Type_enum_Promise struct{ *capnp.Pipeline } + +func (p Type_enum_Promise) Struct() (Type_enum, error) { + s, err := p.Pipeline.Struct() + return Type_enum{s}, err +} + +func (p Type_enum_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p Type_Promise) StructType() Type_structType_Promise { return Type_structType_Promise{p.Pipeline} } + +// Type_structType_Promise is a wrapper for a Type_structType promised by a client call. +type Type_structType_Promise struct{ *capnp.Pipeline } + +func (p Type_structType_Promise) Struct() (Type_structType, error) { + s, err := p.Pipeline.Struct() + return Type_structType{s}, err +} + +func (p Type_structType_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p Type_Promise) Interface() Type_interface_Promise { return Type_interface_Promise{p.Pipeline} } + +// Type_interface_Promise is a wrapper for a Type_interface promised by a client call. +type Type_interface_Promise struct{ *capnp.Pipeline } + +func (p Type_interface_Promise) Struct() (Type_interface, error) { + s, err := p.Pipeline.Struct() + return Type_interface{s}, err +} + +func (p Type_interface_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p Type_Promise) AnyPointer() Type_anyPointer_Promise { return Type_anyPointer_Promise{p.Pipeline} } + +// Type_anyPointer_Promise is a wrapper for a Type_anyPointer promised by a client call. +type Type_anyPointer_Promise struct{ *capnp.Pipeline } + +func (p Type_anyPointer_Promise) Struct() (Type_anyPointer, error) { + s, err := p.Pipeline.Struct() + return Type_anyPointer{s}, err +} + +func (p Type_anyPointer_Promise) Unconstrained() Type_anyPointer_unconstrained_Promise { + return Type_anyPointer_unconstrained_Promise{p.Pipeline} +} + +// Type_anyPointer_unconstrained_Promise is a wrapper for a Type_anyPointer_unconstrained promised by a client call. +type Type_anyPointer_unconstrained_Promise struct{ *capnp.Pipeline } + +func (p Type_anyPointer_unconstrained_Promise) Struct() (Type_anyPointer_unconstrained, error) { + s, err := p.Pipeline.Struct() + return Type_anyPointer_unconstrained{s}, err +} + +func (p Type_anyPointer_Promise) Parameter() Type_anyPointer_parameter_Promise { + return Type_anyPointer_parameter_Promise{p.Pipeline} +} + +// Type_anyPointer_parameter_Promise is a wrapper for a Type_anyPointer_parameter promised by a client call. +type Type_anyPointer_parameter_Promise struct{ *capnp.Pipeline } + +func (p Type_anyPointer_parameter_Promise) Struct() (Type_anyPointer_parameter, error) { + s, err := p.Pipeline.Struct() + return Type_anyPointer_parameter{s}, err +} + +func (p Type_anyPointer_Promise) ImplicitMethodParameter() Type_anyPointer_implicitMethodParameter_Promise { + return Type_anyPointer_implicitMethodParameter_Promise{p.Pipeline} +} + +// Type_anyPointer_implicitMethodParameter_Promise is a wrapper for a Type_anyPointer_implicitMethodParameter promised by a client call. +type Type_anyPointer_implicitMethodParameter_Promise struct{ *capnp.Pipeline } + +func (p Type_anyPointer_implicitMethodParameter_Promise) Struct() (Type_anyPointer_implicitMethodParameter, error) { + s, err := p.Pipeline.Struct() + return Type_anyPointer_implicitMethodParameter{s}, err +} + +type Brand struct{ capnp.Struct } + +// Brand_TypeID is the unique identifier for the type Brand. +const Brand_TypeID = 0x903455f06065422b + +func NewBrand(s *capnp.Segment) (Brand, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Brand{st}, err +} + +func NewRootBrand(s *capnp.Segment) (Brand, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Brand{st}, err +} + +func ReadRootBrand(msg *capnp.Message) (Brand, error) { + root, err := msg.RootPtr() + return Brand{root.Struct()}, err +} + +func (s Brand) Scopes() (Brand_Scope_List, error) { + p, err := s.Struct.Ptr(0) + return Brand_Scope_List{List: p.List()}, err +} + +func (s Brand) HasScopes() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Brand) SetScopes(v Brand_Scope_List) error { + return s.Struct.SetPtr(0, v.List.ToPtr()) +} + +// NewScopes sets the scopes field to a newly +// allocated Brand_Scope_List, preferring placement in s's segment. +func (s Brand) NewScopes(n int32) (Brand_Scope_List, error) { + l, err := NewBrand_Scope_List(s.Struct.Segment(), n) + if err != nil { + return Brand_Scope_List{}, err + } + err = s.Struct.SetPtr(0, l.List.ToPtr()) + return l, err +} + +// Brand_List is a list of Brand. +type Brand_List struct{ capnp.List } + +// NewBrand creates a new list of Brand. +func NewBrand_List(s *capnp.Segment, sz int32) (Brand_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return Brand_List{l}, err +} + +func (s Brand_List) At(i int) Brand { return Brand{s.List.Struct(i)} } + +func (s Brand_List) Set(i int, v Brand) error { return s.List.SetStruct(i, v.Struct) } + +// Brand_Promise is a wrapper for a Brand promised by a client call. +type Brand_Promise struct{ *capnp.Pipeline } + +func (p Brand_Promise) Struct() (Brand, error) { + s, err := p.Pipeline.Struct() + return Brand{s}, err +} + +type Brand_Scope struct{ capnp.Struct } +type Brand_Scope_Which uint16 + +const ( + Brand_Scope_Which_bind Brand_Scope_Which = 0 + Brand_Scope_Which_inherit Brand_Scope_Which = 1 +) + +func (w Brand_Scope_Which) String() string { + const s = "bindinherit" + switch w { + case Brand_Scope_Which_bind: + return s[0:4] + case Brand_Scope_Which_inherit: + return s[4:11] + + } + return "Brand_Scope_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Brand_Scope_TypeID is the unique identifier for the type Brand_Scope. +const Brand_Scope_TypeID = 0xabd73485a9636bc9 + +func NewBrand_Scope(s *capnp.Segment) (Brand_Scope, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Brand_Scope{st}, err +} + +func NewRootBrand_Scope(s *capnp.Segment) (Brand_Scope, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Brand_Scope{st}, err +} + +func ReadRootBrand_Scope(msg *capnp.Message) (Brand_Scope, error) { + root, err := msg.RootPtr() + return Brand_Scope{root.Struct()}, err +} + +func (s Brand_Scope) Which() Brand_Scope_Which { + return Brand_Scope_Which(s.Struct.Uint16(8)) +} +func (s Brand_Scope) ScopeId() uint64 { + return s.Struct.Uint64(0) +} + +func (s Brand_Scope) SetScopeId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s Brand_Scope) Bind() (Brand_Binding_List, error) { + p, err := s.Struct.Ptr(0) + return Brand_Binding_List{List: p.List()}, err +} + +func (s Brand_Scope) HasBind() bool { + if s.Struct.Uint16(8) != 0 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Brand_Scope) SetBind(v Brand_Binding_List) error { + s.Struct.SetUint16(8, 0) + return s.Struct.SetPtr(0, v.List.ToPtr()) +} + +// NewBind sets the bind field to a newly +// allocated Brand_Binding_List, preferring placement in s's segment. +func (s Brand_Scope) NewBind(n int32) (Brand_Binding_List, error) { + s.Struct.SetUint16(8, 0) + l, err := NewBrand_Binding_List(s.Struct.Segment(), n) + if err != nil { + return Brand_Binding_List{}, err + } + err = s.Struct.SetPtr(0, l.List.ToPtr()) + return l, err +} + +func (s Brand_Scope) SetInherit() { + s.Struct.SetUint16(8, 1) + +} + +// Brand_Scope_List is a list of Brand_Scope. +type Brand_Scope_List struct{ capnp.List } + +// NewBrand_Scope creates a new list of Brand_Scope. +func NewBrand_Scope_List(s *capnp.Segment, sz int32) (Brand_Scope_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}, sz) + return Brand_Scope_List{l}, err +} + +func (s Brand_Scope_List) At(i int) Brand_Scope { return Brand_Scope{s.List.Struct(i)} } + +func (s Brand_Scope_List) Set(i int, v Brand_Scope) error { return s.List.SetStruct(i, v.Struct) } + +// Brand_Scope_Promise is a wrapper for a Brand_Scope promised by a client call. +type Brand_Scope_Promise struct{ *capnp.Pipeline } + +func (p Brand_Scope_Promise) Struct() (Brand_Scope, error) { + s, err := p.Pipeline.Struct() + return Brand_Scope{s}, err +} + +type Brand_Binding struct{ capnp.Struct } +type Brand_Binding_Which uint16 + +const ( + Brand_Binding_Which_unbound Brand_Binding_Which = 0 + Brand_Binding_Which_type Brand_Binding_Which = 1 +) + +func (w Brand_Binding_Which) String() string { + const s = "unboundtype" + switch w { + case Brand_Binding_Which_unbound: + return s[0:7] + case Brand_Binding_Which_type: + return s[7:11] + + } + return "Brand_Binding_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Brand_Binding_TypeID is the unique identifier for the type Brand_Binding. +const Brand_Binding_TypeID = 0xc863cd16969ee7fc + +func NewBrand_Binding(s *capnp.Segment) (Brand_Binding, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Brand_Binding{st}, err +} + +func NewRootBrand_Binding(s *capnp.Segment) (Brand_Binding, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return Brand_Binding{st}, err +} + +func ReadRootBrand_Binding(msg *capnp.Message) (Brand_Binding, error) { + root, err := msg.RootPtr() + return Brand_Binding{root.Struct()}, err +} + +func (s Brand_Binding) Which() Brand_Binding_Which { + return Brand_Binding_Which(s.Struct.Uint16(0)) +} +func (s Brand_Binding) SetUnbound() { + s.Struct.SetUint16(0, 0) + +} + +func (s Brand_Binding) Type() (Type, error) { + p, err := s.Struct.Ptr(0) + return Type{Struct: p.Struct()}, err +} + +func (s Brand_Binding) HasType() bool { + if s.Struct.Uint16(0) != 1 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Brand_Binding) SetType(v Type) error { + s.Struct.SetUint16(0, 1) + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewType sets the type field to a newly +// allocated Type struct, preferring placement in s's segment. +func (s Brand_Binding) NewType() (Type, error) { + s.Struct.SetUint16(0, 1) + ss, err := NewType(s.Struct.Segment()) + if err != nil { + return Type{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// Brand_Binding_List is a list of Brand_Binding. +type Brand_Binding_List struct{ capnp.List } + +// NewBrand_Binding creates a new list of Brand_Binding. +func NewBrand_Binding_List(s *capnp.Segment, sz int32) (Brand_Binding_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz) + return Brand_Binding_List{l}, err +} + +func (s Brand_Binding_List) At(i int) Brand_Binding { return Brand_Binding{s.List.Struct(i)} } + +func (s Brand_Binding_List) Set(i int, v Brand_Binding) error { return s.List.SetStruct(i, v.Struct) } + +// Brand_Binding_Promise is a wrapper for a Brand_Binding promised by a client call. +type Brand_Binding_Promise struct{ *capnp.Pipeline } + +func (p Brand_Binding_Promise) Struct() (Brand_Binding, error) { + s, err := p.Pipeline.Struct() + return Brand_Binding{s}, err +} + +func (p Brand_Binding_Promise) Type() Type_Promise { + return Type_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +type Value struct{ capnp.Struct } +type Value_Which uint16 + +const ( + Value_Which_void Value_Which = 0 + Value_Which_bool Value_Which = 1 + Value_Which_int8 Value_Which = 2 + Value_Which_int16 Value_Which = 3 + Value_Which_int32 Value_Which = 4 + Value_Which_int64 Value_Which = 5 + Value_Which_uint8 Value_Which = 6 + Value_Which_uint16 Value_Which = 7 + Value_Which_uint32 Value_Which = 8 + Value_Which_uint64 Value_Which = 9 + Value_Which_float32 Value_Which = 10 + Value_Which_float64 Value_Which = 11 + Value_Which_text Value_Which = 12 + Value_Which_data Value_Which = 13 + Value_Which_list Value_Which = 14 + Value_Which_enum Value_Which = 15 + Value_Which_structValue Value_Which = 16 + Value_Which_interface Value_Which = 17 + Value_Which_anyPointer Value_Which = 18 +) + +func (w Value_Which) String() string { + const s = "voidboolint8int16int32int64uint8uint16uint32uint64float32float64textdatalistenumstructValueinterfaceanyPointer" + switch w { + case Value_Which_void: + return s[0:4] + case Value_Which_bool: + return s[4:8] + case Value_Which_int8: + return s[8:12] + case Value_Which_int16: + return s[12:17] + case Value_Which_int32: + return s[17:22] + case Value_Which_int64: + return s[22:27] + case Value_Which_uint8: + return s[27:32] + case Value_Which_uint16: + return s[32:38] + case Value_Which_uint32: + return s[38:44] + case Value_Which_uint64: + return s[44:50] + case Value_Which_float32: + return s[50:57] + case Value_Which_float64: + return s[57:64] + case Value_Which_text: + return s[64:68] + case Value_Which_data: + return s[68:72] + case Value_Which_list: + return s[72:76] + case Value_Which_enum: + return s[76:80] + case Value_Which_structValue: + return s[80:91] + case Value_Which_interface: + return s[91:100] + case Value_Which_anyPointer: + return s[100:110] + + } + return "Value_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Value_TypeID is the unique identifier for the type Value. +const Value_TypeID = 0xce23dcd2d7b00c9b + +func NewValue(s *capnp.Segment) (Value, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Value{st}, err +} + +func NewRootValue(s *capnp.Segment) (Value, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Value{st}, err +} + +func ReadRootValue(msg *capnp.Message) (Value, error) { + root, err := msg.RootPtr() + return Value{root.Struct()}, err +} + +func (s Value) Which() Value_Which { + return Value_Which(s.Struct.Uint16(0)) +} +func (s Value) SetVoid() { + s.Struct.SetUint16(0, 0) + +} + +func (s Value) Bool() bool { + return s.Struct.Bit(16) +} + +func (s Value) SetBool(v bool) { + s.Struct.SetUint16(0, 1) + s.Struct.SetBit(16, v) +} + +func (s Value) Int8() int8 { + return int8(s.Struct.Uint8(2)) +} + +func (s Value) SetInt8(v int8) { + s.Struct.SetUint16(0, 2) + s.Struct.SetUint8(2, uint8(v)) +} + +func (s Value) Int16() int16 { + return int16(s.Struct.Uint16(2)) +} + +func (s Value) SetInt16(v int16) { + s.Struct.SetUint16(0, 3) + s.Struct.SetUint16(2, uint16(v)) +} + +func (s Value) Int32() int32 { + return int32(s.Struct.Uint32(4)) +} + +func (s Value) SetInt32(v int32) { + s.Struct.SetUint16(0, 4) + s.Struct.SetUint32(4, uint32(v)) +} + +func (s Value) Int64() int64 { + return int64(s.Struct.Uint64(8)) +} + +func (s Value) SetInt64(v int64) { + s.Struct.SetUint16(0, 5) + s.Struct.SetUint64(8, uint64(v)) +} + +func (s Value) Uint8() uint8 { + return s.Struct.Uint8(2) +} + +func (s Value) SetUint8(v uint8) { + s.Struct.SetUint16(0, 6) + s.Struct.SetUint8(2, v) +} + +func (s Value) Uint16() uint16 { + return s.Struct.Uint16(2) +} + +func (s Value) SetUint16(v uint16) { + s.Struct.SetUint16(0, 7) + s.Struct.SetUint16(2, v) +} + +func (s Value) Uint32() uint32 { + return s.Struct.Uint32(4) +} + +func (s Value) SetUint32(v uint32) { + s.Struct.SetUint16(0, 8) + s.Struct.SetUint32(4, v) +} + +func (s Value) Uint64() uint64 { + return s.Struct.Uint64(8) +} + +func (s Value) SetUint64(v uint64) { + s.Struct.SetUint16(0, 9) + s.Struct.SetUint64(8, v) +} + +func (s Value) Float32() float32 { + return math.Float32frombits(s.Struct.Uint32(4)) +} + +func (s Value) SetFloat32(v float32) { + s.Struct.SetUint16(0, 10) + s.Struct.SetUint32(4, math.Float32bits(v)) +} + +func (s Value) Float64() float64 { + return math.Float64frombits(s.Struct.Uint64(8)) +} + +func (s Value) SetFloat64(v float64) { + s.Struct.SetUint16(0, 11) + s.Struct.SetUint64(8, math.Float64bits(v)) +} + +func (s Value) Text() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s Value) HasText() bool { + if s.Struct.Uint16(0) != 12 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) TextBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s Value) SetText(v string) error { + s.Struct.SetUint16(0, 12) + return s.Struct.SetText(0, v) +} + +func (s Value) Data() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return []byte(p.Data()), err +} + +func (s Value) HasData() bool { + if s.Struct.Uint16(0) != 13 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) SetData(v []byte) error { + s.Struct.SetUint16(0, 13) + return s.Struct.SetData(0, v) +} + +func (s Value) List() (capnp.Pointer, error) { + return s.Struct.Pointer(0) +} + +func (s Value) HasList() bool { + if s.Struct.Uint16(0) != 14 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) ListPtr() (capnp.Ptr, error) { + return s.Struct.Ptr(0) +} + +func (s Value) SetList(v capnp.Pointer) error { + s.Struct.SetUint16(0, 14) + return s.Struct.SetPointer(0, v) +} + +func (s Value) SetListPtr(v capnp.Ptr) error { + s.Struct.SetUint16(0, 14) + return s.Struct.SetPtr(0, v) +} + +func (s Value) Enum() uint16 { + return s.Struct.Uint16(2) +} + +func (s Value) SetEnum(v uint16) { + s.Struct.SetUint16(0, 15) + s.Struct.SetUint16(2, v) +} + +func (s Value) StructValue() (capnp.Pointer, error) { + return s.Struct.Pointer(0) +} + +func (s Value) HasStructValue() bool { + if s.Struct.Uint16(0) != 16 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) StructValuePtr() (capnp.Ptr, error) { + return s.Struct.Ptr(0) +} + +func (s Value) SetStructValue(v capnp.Pointer) error { + s.Struct.SetUint16(0, 16) + return s.Struct.SetPointer(0, v) +} + +func (s Value) SetStructValuePtr(v capnp.Ptr) error { + s.Struct.SetUint16(0, 16) + return s.Struct.SetPtr(0, v) +} + +func (s Value) SetInterface() { + s.Struct.SetUint16(0, 17) + +} + +func (s Value) AnyPointer() (capnp.Pointer, error) { + return s.Struct.Pointer(0) +} + +func (s Value) HasAnyPointer() bool { + if s.Struct.Uint16(0) != 18 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Value) AnyPointerPtr() (capnp.Ptr, error) { + return s.Struct.Ptr(0) +} + +func (s Value) SetAnyPointer(v capnp.Pointer) error { + s.Struct.SetUint16(0, 18) + return s.Struct.SetPointer(0, v) +} + +func (s Value) SetAnyPointerPtr(v capnp.Ptr) error { + s.Struct.SetUint16(0, 18) + return s.Struct.SetPtr(0, v) +} + +// Value_List is a list of Value. +type Value_List struct{ capnp.List } + +// NewValue creates a new list of Value. +func NewValue_List(s *capnp.Segment, sz int32) (Value_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}, sz) + return Value_List{l}, err +} + +func (s Value_List) At(i int) Value { return Value{s.List.Struct(i)} } + +func (s Value_List) Set(i int, v Value) error { return s.List.SetStruct(i, v.Struct) } + +// Value_Promise is a wrapper for a Value promised by a client call. +type Value_Promise struct{ *capnp.Pipeline } + +func (p Value_Promise) Struct() (Value, error) { + s, err := p.Pipeline.Struct() + return Value{s}, err +} + +func (p Value_Promise) List() *capnp.Pipeline { + return p.Pipeline.GetPipeline(0) +} + +func (p Value_Promise) StructValue() *capnp.Pipeline { + return p.Pipeline.GetPipeline(0) +} + +func (p Value_Promise) AnyPointer() *capnp.Pipeline { + return p.Pipeline.GetPipeline(0) +} + +type Annotation struct{ capnp.Struct } + +// Annotation_TypeID is the unique identifier for the type Annotation. +const Annotation_TypeID = 0xf1c8950dab257542 + +func NewAnnotation(s *capnp.Segment) (Annotation, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Annotation{st}, err +} + +func NewRootAnnotation(s *capnp.Segment) (Annotation, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Annotation{st}, err +} + +func ReadRootAnnotation(msg *capnp.Message) (Annotation, error) { + root, err := msg.RootPtr() + return Annotation{root.Struct()}, err +} + +func (s Annotation) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s Annotation) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s Annotation) Brand() (Brand, error) { + p, err := s.Struct.Ptr(1) + return Brand{Struct: p.Struct()}, err +} + +func (s Annotation) HasBrand() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s Annotation) SetBrand(v Brand) error { + return s.Struct.SetPtr(1, v.Struct.ToPtr()) +} + +// NewBrand sets the brand field to a newly +// allocated Brand struct, preferring placement in s's segment. +func (s Annotation) NewBrand() (Brand, error) { + ss, err := NewBrand(s.Struct.Segment()) + if err != nil { + return Brand{}, err + } + err = s.Struct.SetPtr(1, ss.Struct.ToPtr()) + return ss, err +} + +func (s Annotation) Value() (Value, error) { + p, err := s.Struct.Ptr(0) + return Value{Struct: p.Struct()}, err +} + +func (s Annotation) HasValue() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s Annotation) SetValue(v Value) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewValue sets the value field to a newly +// allocated Value struct, preferring placement in s's segment. +func (s Annotation) NewValue() (Value, error) { + ss, err := NewValue(s.Struct.Segment()) + if err != nil { + return Value{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +// Annotation_List is a list of Annotation. +type Annotation_List struct{ capnp.List } + +// NewAnnotation creates a new list of Annotation. +func NewAnnotation_List(s *capnp.Segment, sz int32) (Annotation_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz) + return Annotation_List{l}, err +} + +func (s Annotation_List) At(i int) Annotation { return Annotation{s.List.Struct(i)} } + +func (s Annotation_List) Set(i int, v Annotation) error { return s.List.SetStruct(i, v.Struct) } + +// Annotation_Promise is a wrapper for a Annotation promised by a client call. +type Annotation_Promise struct{ *capnp.Pipeline } + +func (p Annotation_Promise) Struct() (Annotation, error) { + s, err := p.Pipeline.Struct() + return Annotation{s}, err +} + +func (p Annotation_Promise) Brand() Brand_Promise { + return Brand_Promise{Pipeline: p.Pipeline.GetPipeline(1)} +} + +func (p Annotation_Promise) Value() Value_Promise { + return Value_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +type ElementSize uint16 + +// ElementSize_TypeID is the unique identifier for the type ElementSize. +const ElementSize_TypeID = 0xd1958f7dba521926 + +// Values of ElementSize. +const ( + ElementSize_empty ElementSize = 0 + ElementSize_bit ElementSize = 1 + ElementSize_byte ElementSize = 2 + ElementSize_twoBytes ElementSize = 3 + ElementSize_fourBytes ElementSize = 4 + ElementSize_eightBytes ElementSize = 5 + ElementSize_pointer ElementSize = 6 + ElementSize_inlineComposite ElementSize = 7 +) + +// String returns the enum's constant name. +func (c ElementSize) String() string { + switch c { + case ElementSize_empty: + return "empty" + case ElementSize_bit: + return "bit" + case ElementSize_byte: + return "byte" + case ElementSize_twoBytes: + return "twoBytes" + case ElementSize_fourBytes: + return "fourBytes" + case ElementSize_eightBytes: + return "eightBytes" + case ElementSize_pointer: + return "pointer" + case ElementSize_inlineComposite: + return "inlineComposite" + + default: + return "" + } +} + +// ElementSizeFromString returns the enum value with a name, +// or the zero value if there's no such value. +func ElementSizeFromString(c string) ElementSize { + switch c { + case "empty": + return ElementSize_empty + case "bit": + return ElementSize_bit + case "byte": + return ElementSize_byte + case "twoBytes": + return ElementSize_twoBytes + case "fourBytes": + return ElementSize_fourBytes + case "eightBytes": + return ElementSize_eightBytes + case "pointer": + return ElementSize_pointer + case "inlineComposite": + return ElementSize_inlineComposite + + default: + return 0 + } +} + +type ElementSize_List struct{ capnp.List } + +func NewElementSize_List(s *capnp.Segment, sz int32) (ElementSize_List, error) { + l, err := capnp.NewUInt16List(s, sz) + return ElementSize_List{l.List}, err +} + +func (l ElementSize_List) At(i int) ElementSize { + ul := capnp.UInt16List{List: l.List} + return ElementSize(ul.At(i)) +} + +func (l ElementSize_List) Set(i int, v ElementSize) { + ul := capnp.UInt16List{List: l.List} + ul.Set(i, uint16(v)) +} + +type CodeGeneratorRequest struct{ capnp.Struct } + +// CodeGeneratorRequest_TypeID is the unique identifier for the type CodeGeneratorRequest. +const CodeGeneratorRequest_TypeID = 0xbfc546f6210ad7ce + +func NewCodeGeneratorRequest(s *capnp.Segment) (CodeGeneratorRequest, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return CodeGeneratorRequest{st}, err +} + +func NewRootCodeGeneratorRequest(s *capnp.Segment) (CodeGeneratorRequest, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return CodeGeneratorRequest{st}, err +} + +func ReadRootCodeGeneratorRequest(msg *capnp.Message) (CodeGeneratorRequest, error) { + root, err := msg.RootPtr() + return CodeGeneratorRequest{root.Struct()}, err +} + +func (s CodeGeneratorRequest) Nodes() (Node_List, error) { + p, err := s.Struct.Ptr(0) + return Node_List{List: p.List()}, err +} + +func (s CodeGeneratorRequest) HasNodes() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest) SetNodes(v Node_List) error { + return s.Struct.SetPtr(0, v.List.ToPtr()) +} + +// NewNodes sets the nodes field to a newly +// allocated Node_List, preferring placement in s's segment. +func (s CodeGeneratorRequest) NewNodes(n int32) (Node_List, error) { + l, err := NewNode_List(s.Struct.Segment(), n) + if err != nil { + return Node_List{}, err + } + err = s.Struct.SetPtr(0, l.List.ToPtr()) + return l, err +} + +func (s CodeGeneratorRequest) RequestedFiles() (CodeGeneratorRequest_RequestedFile_List, error) { + p, err := s.Struct.Ptr(1) + return CodeGeneratorRequest_RequestedFile_List{List: p.List()}, err +} + +func (s CodeGeneratorRequest) HasRequestedFiles() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest) SetRequestedFiles(v CodeGeneratorRequest_RequestedFile_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewRequestedFiles sets the requestedFiles field to a newly +// allocated CodeGeneratorRequest_RequestedFile_List, preferring placement in s's segment. +func (s CodeGeneratorRequest) NewRequestedFiles(n int32) (CodeGeneratorRequest_RequestedFile_List, error) { + l, err := NewCodeGeneratorRequest_RequestedFile_List(s.Struct.Segment(), n) + if err != nil { + return CodeGeneratorRequest_RequestedFile_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +// CodeGeneratorRequest_List is a list of CodeGeneratorRequest. +type CodeGeneratorRequest_List struct{ capnp.List } + +// NewCodeGeneratorRequest creates a new list of CodeGeneratorRequest. +func NewCodeGeneratorRequest_List(s *capnp.Segment, sz int32) (CodeGeneratorRequest_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz) + return CodeGeneratorRequest_List{l}, err +} + +func (s CodeGeneratorRequest_List) At(i int) CodeGeneratorRequest { + return CodeGeneratorRequest{s.List.Struct(i)} +} + +func (s CodeGeneratorRequest_List) Set(i int, v CodeGeneratorRequest) error { + return s.List.SetStruct(i, v.Struct) +} + +// CodeGeneratorRequest_Promise is a wrapper for a CodeGeneratorRequest promised by a client call. +type CodeGeneratorRequest_Promise struct{ *capnp.Pipeline } + +func (p CodeGeneratorRequest_Promise) Struct() (CodeGeneratorRequest, error) { + s, err := p.Pipeline.Struct() + return CodeGeneratorRequest{s}, err +} + +type CodeGeneratorRequest_RequestedFile struct{ capnp.Struct } + +// CodeGeneratorRequest_RequestedFile_TypeID is the unique identifier for the type CodeGeneratorRequest_RequestedFile. +const CodeGeneratorRequest_RequestedFile_TypeID = 0xcfea0eb02e810062 + +func NewCodeGeneratorRequest_RequestedFile(s *capnp.Segment) (CodeGeneratorRequest_RequestedFile, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return CodeGeneratorRequest_RequestedFile{st}, err +} + +func NewRootCodeGeneratorRequest_RequestedFile(s *capnp.Segment) (CodeGeneratorRequest_RequestedFile, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return CodeGeneratorRequest_RequestedFile{st}, err +} + +func ReadRootCodeGeneratorRequest_RequestedFile(msg *capnp.Message) (CodeGeneratorRequest_RequestedFile, error) { + root, err := msg.RootPtr() + return CodeGeneratorRequest_RequestedFile{root.Struct()}, err +} + +func (s CodeGeneratorRequest_RequestedFile) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s CodeGeneratorRequest_RequestedFile) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s CodeGeneratorRequest_RequestedFile) Filename() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s CodeGeneratorRequest_RequestedFile) HasFilename() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest_RequestedFile) FilenameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s CodeGeneratorRequest_RequestedFile) SetFilename(v string) error { + return s.Struct.SetText(0, v) +} + +func (s CodeGeneratorRequest_RequestedFile) Imports() (CodeGeneratorRequest_RequestedFile_Import_List, error) { + p, err := s.Struct.Ptr(1) + return CodeGeneratorRequest_RequestedFile_Import_List{List: p.List()}, err +} + +func (s CodeGeneratorRequest_RequestedFile) HasImports() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest_RequestedFile) SetImports(v CodeGeneratorRequest_RequestedFile_Import_List) error { + return s.Struct.SetPtr(1, v.List.ToPtr()) +} + +// NewImports sets the imports field to a newly +// allocated CodeGeneratorRequest_RequestedFile_Import_List, preferring placement in s's segment. +func (s CodeGeneratorRequest_RequestedFile) NewImports(n int32) (CodeGeneratorRequest_RequestedFile_Import_List, error) { + l, err := NewCodeGeneratorRequest_RequestedFile_Import_List(s.Struct.Segment(), n) + if err != nil { + return CodeGeneratorRequest_RequestedFile_Import_List{}, err + } + err = s.Struct.SetPtr(1, l.List.ToPtr()) + return l, err +} + +// CodeGeneratorRequest_RequestedFile_List is a list of CodeGeneratorRequest_RequestedFile. +type CodeGeneratorRequest_RequestedFile_List struct{ capnp.List } + +// NewCodeGeneratorRequest_RequestedFile creates a new list of CodeGeneratorRequest_RequestedFile. +func NewCodeGeneratorRequest_RequestedFile_List(s *capnp.Segment, sz int32) (CodeGeneratorRequest_RequestedFile_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz) + return CodeGeneratorRequest_RequestedFile_List{l}, err +} + +func (s CodeGeneratorRequest_RequestedFile_List) At(i int) CodeGeneratorRequest_RequestedFile { + return CodeGeneratorRequest_RequestedFile{s.List.Struct(i)} +} + +func (s CodeGeneratorRequest_RequestedFile_List) Set(i int, v CodeGeneratorRequest_RequestedFile) error { + return s.List.SetStruct(i, v.Struct) +} + +// CodeGeneratorRequest_RequestedFile_Promise is a wrapper for a CodeGeneratorRequest_RequestedFile promised by a client call. +type CodeGeneratorRequest_RequestedFile_Promise struct{ *capnp.Pipeline } + +func (p CodeGeneratorRequest_RequestedFile_Promise) Struct() (CodeGeneratorRequest_RequestedFile, error) { + s, err := p.Pipeline.Struct() + return CodeGeneratorRequest_RequestedFile{s}, err +} + +type CodeGeneratorRequest_RequestedFile_Import struct{ capnp.Struct } + +// CodeGeneratorRequest_RequestedFile_Import_TypeID is the unique identifier for the type CodeGeneratorRequest_RequestedFile_Import. +const CodeGeneratorRequest_RequestedFile_Import_TypeID = 0xae504193122357e5 + +func NewCodeGeneratorRequest_RequestedFile_Import(s *capnp.Segment) (CodeGeneratorRequest_RequestedFile_Import, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return CodeGeneratorRequest_RequestedFile_Import{st}, err +} + +func NewRootCodeGeneratorRequest_RequestedFile_Import(s *capnp.Segment) (CodeGeneratorRequest_RequestedFile_Import, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}) + return CodeGeneratorRequest_RequestedFile_Import{st}, err +} + +func ReadRootCodeGeneratorRequest_RequestedFile_Import(msg *capnp.Message) (CodeGeneratorRequest_RequestedFile_Import, error) { + root, err := msg.RootPtr() + return CodeGeneratorRequest_RequestedFile_Import{root.Struct()}, err +} + +func (s CodeGeneratorRequest_RequestedFile_Import) Id() uint64 { + return s.Struct.Uint64(0) +} + +func (s CodeGeneratorRequest_RequestedFile_Import) SetId(v uint64) { + s.Struct.SetUint64(0, v) +} + +func (s CodeGeneratorRequest_RequestedFile_Import) Name() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s CodeGeneratorRequest_RequestedFile_Import) HasName() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s CodeGeneratorRequest_RequestedFile_Import) NameBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s CodeGeneratorRequest_RequestedFile_Import) SetName(v string) error { + return s.Struct.SetText(0, v) +} + +// CodeGeneratorRequest_RequestedFile_Import_List is a list of CodeGeneratorRequest_RequestedFile_Import. +type CodeGeneratorRequest_RequestedFile_Import_List struct{ capnp.List } + +// NewCodeGeneratorRequest_RequestedFile_Import creates a new list of CodeGeneratorRequest_RequestedFile_Import. +func NewCodeGeneratorRequest_RequestedFile_Import_List(s *capnp.Segment, sz int32) (CodeGeneratorRequest_RequestedFile_Import_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz) + return CodeGeneratorRequest_RequestedFile_Import_List{l}, err +} + +func (s CodeGeneratorRequest_RequestedFile_Import_List) At(i int) CodeGeneratorRequest_RequestedFile_Import { + return CodeGeneratorRequest_RequestedFile_Import{s.List.Struct(i)} +} + +func (s CodeGeneratorRequest_RequestedFile_Import_List) Set(i int, v CodeGeneratorRequest_RequestedFile_Import) error { + return s.List.SetStruct(i, v.Struct) +} + +// CodeGeneratorRequest_RequestedFile_Import_Promise is a wrapper for a CodeGeneratorRequest_RequestedFile_Import promised by a client call. +type CodeGeneratorRequest_RequestedFile_Import_Promise struct{ *capnp.Pipeline } + +func (p CodeGeneratorRequest_RequestedFile_Import_Promise) Struct() (CodeGeneratorRequest_RequestedFile_Import, error) { + s, err := p.Pipeline.Struct() + return CodeGeneratorRequest_RequestedFile_Import{s}, err +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/BUILD.bazel b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/BUILD.bazel new file mode 100644 index 00000000..39b4cf74 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/BUILD.bazel @@ -0,0 +1,7 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["strquote.go"], + visibility = ["//:__subpackages__"], +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/strquote.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/strquote.go new file mode 100644 index 00000000..f6af09b9 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/internal/strquote/strquote.go @@ -0,0 +1,52 @@ +// Package strquote provides a function for formatting a string as a +// Cap'n Proto string literal. +package strquote + +// Append appends a Cap'n Proto string literal of s to buf. +func Append(buf []byte, s []byte) []byte { + buf = append(buf, '"') + last := 0 + for i, b := range s { + if !needsEscape(b) { + continue + } + buf = append(buf, s[last:i]...) + switch b { + case '\a': + buf = append(buf, '\\', 'a') + case '\b': + buf = append(buf, '\\', 'b') + case '\f': + buf = append(buf, '\\', 'f') + case '\n': + buf = append(buf, '\\', 'n') + case '\r': + buf = append(buf, '\\', 'r') + case '\t': + buf = append(buf, '\\', 't') + case '\v': + buf = append(buf, '\\', 'v') + case '\'': + buf = append(buf, '\\', '\'') + case '"': + buf = append(buf, '\\', '"') + case '\\': + buf = append(buf, '\\', '\\') + default: + buf = append(buf, '\\', 'x', hexDigit(b/16), hexDigit(b%16)) + } + last = i + 1 + } + buf = append(buf, s[last:]...) + buf = append(buf, '"') + return buf +} + +func needsEscape(b byte) bool { + return b < 0x20 || b >= 0x7f +} + +func hexDigit(b byte) byte { + const digits = "0123456789abcdef" + return digits[b] +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/list.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/list.go new file mode 100644 index 00000000..95a09492 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/list.go @@ -0,0 +1,1044 @@ +package capnp + +import ( + "errors" + "math" + "strconv" + + "zombiezen.com/go/capnproto2/internal/strquote" +) + +// A List is a reference to an array of values. +type List struct { + seg *Segment + off Address // at beginning of elements (past composite list tag word) + length int32 + size ObjectSize + depthLimit uint + flags listFlags +} + +// newPrimitiveList allocates a new list of primitive values, preferring placement in s. +func newPrimitiveList(s *Segment, sz Size, n int32) (List, error) { + total, ok := sz.times(n) + if !ok { + return List{}, errOverflow + } + s, addr, err := alloc(s, total) + if err != nil { + return List{}, err + } + return List{ + seg: s, + off: addr, + length: n, + size: ObjectSize{DataSize: sz}, + depthLimit: maxDepth, + }, nil +} + +// NewCompositeList creates a new composite list, preferring placement +// in s. +func NewCompositeList(s *Segment, sz ObjectSize, n int32) (List, error) { + if !sz.isValid() { + return List{}, errObjectSize + } + sz.DataSize = sz.DataSize.padToWord() + total, ok := sz.totalSize().times(n) + if !ok || total > maxSize-wordSize { + return List{}, errOverflow + } + s, addr, err := alloc(s, wordSize+total) + if err != nil { + return List{}, err + } + // Add tag word + s.writeRawPointer(addr, rawStructPointer(pointerOffset(n), sz)) + return List{ + seg: s, + off: addr + Address(wordSize), + length: n, + size: sz, + flags: isCompositeList, + depthLimit: maxDepth, + }, nil +} + +// ToList converts p to a List. +// +// Deprecated: Use Ptr.List. +func ToList(p Pointer) List { + return toPtr(p).List() +} + +// ToListDefault attempts to convert p into a list, reading the default +// value from def if p is not a list. +// +// Deprecated: Use Ptr.ListDefault. +func ToListDefault(p Pointer, def []byte) (List, error) { + return toPtr(p).ListDefault(def) +} + +// ToPtr converts the list to a generic pointer. +func (p List) ToPtr() Ptr { + return Ptr{ + seg: p.seg, + off: p.off, + lenOrCap: uint32(p.length), + size: p.size, + depthLimit: p.depthLimit, + flags: listPtrFlag(p.flags), + } +} + +// Segment returns the segment this pointer references. +func (p List) Segment() *Segment { + return p.seg +} + +// IsValid returns whether the list is valid. +func (p List) IsValid() bool { + return p.seg != nil +} + +// HasData reports whether the list's total size is non-zero. +func (p List) HasData() bool { + sz, ok := p.size.totalSize().times(p.length) + if !ok { + return false + } + return sz > 0 +} + +// readSize returns the list's size for the purposes of read limit +// accounting. +func (p List) readSize() Size { + if p.seg == nil { + return 0 + } + e := p.size.totalSize() + if e == 0 { + e = wordSize + } + sz, ok := e.times(p.length) + if !ok { + return maxSize + } + return sz +} + +// allocSize returns the list's size for the purpose of copying the list +// to a different message. +func (p List) allocSize() Size { + if p.seg == nil { + return 0 + } + if p.flags&isBitList != 0 { + return Size((p.length + 7) / 8) + } + sz, _ := p.size.totalSize().times(p.length) // size has already been validated + if p.flags&isCompositeList == 0 { + return sz + } + return sz + wordSize +} + +// raw returns the equivalent raw list pointer with a zero offset. +func (p List) raw() rawPointer { + if p.seg == nil { + return 0 + } + if p.flags&isCompositeList != 0 { + return rawListPointer(0, compositeList, p.length*p.size.totalWordCount()) + } + if p.flags&isBitList != 0 { + return rawListPointer(0, bit1List, p.length) + } + if p.size.PointerCount == 1 && p.size.DataSize == 0 { + return rawListPointer(0, pointerList, p.length) + } + if p.size.PointerCount != 0 { + panic(errListSize) + } + switch p.size.DataSize { + case 0: + return rawListPointer(0, voidList, p.length) + case 1: + return rawListPointer(0, byte1List, p.length) + case 2: + return rawListPointer(0, byte2List, p.length) + case 4: + return rawListPointer(0, byte4List, p.length) + case 8: + return rawListPointer(0, byte8List, p.length) + default: + panic(errListSize) + } +} + +func (p List) underlying() Pointer { + return p +} + +// Address returns the address the pointer references. +// +// Deprecated: The return value is not well-defined. Use SamePtr if you +// need to check whether two pointers refer to the same object. +func (p List) Address() Address { + return p.off +} + +// Len returns the length of the list. +func (p List) Len() int { + if p.seg == nil { + return 0 + } + return int(p.length) +} + +// primitiveElem returns the address of the segment data for a list element. +// Calling this on a bit list returns an error. +func (p List) primitiveElem(i int, expectedSize ObjectSize) (Address, error) { + if p.seg == nil || i < 0 || i >= int(p.length) { + // This is programmer error, not input error. + panic(errOutOfBounds) + } + if p.flags&isBitList != 0 || p.flags&isCompositeList == 0 && p.size != expectedSize || p.flags&isCompositeList != 0 && (p.size.DataSize < expectedSize.DataSize || p.size.PointerCount < expectedSize.PointerCount) { + return 0, errElementSize + } + addr, ok := p.off.element(int32(i), p.size.totalSize()) + if !ok { + return 0, errOverflow + } + return addr, nil +} + +// Struct returns the i'th element as a struct. +func (p List) Struct(i int) Struct { + if p.seg == nil || i < 0 || i >= int(p.length) { + // This is programmer error, not input error. + panic(errOutOfBounds) + } + if p.flags&isBitList != 0 { + return Struct{} + } + addr, ok := p.off.element(int32(i), p.size.totalSize()) + if !ok { + return Struct{} + } + return Struct{ + seg: p.seg, + off: addr, + size: p.size, + flags: isListMember, + depthLimit: p.depthLimit - 1, + } +} + +// SetStruct set the i'th element to the value in s. +func (p List) SetStruct(i int, s Struct) error { + if p.flags&isBitList != 0 { + return errBitListStruct + } + return copyStruct(p.Struct(i), s) +} + +// A BitList is a reference to a list of booleans. +type BitList struct{ List } + +// NewBitList creates a new bit list, preferring placement in s. +func NewBitList(s *Segment, n int32) (BitList, error) { + s, addr, err := alloc(s, Size(int64(n+7)/8)) + if err != nil { + return BitList{}, err + } + return BitList{List{ + seg: s, + off: addr, + length: n, + flags: isBitList, + depthLimit: maxDepth, + }}, nil +} + +// At returns the i'th bit. +func (p BitList) At(i int) bool { + if p.seg == nil || i < 0 || i >= int(p.length) { + // This is programmer error, not input error. + panic(errOutOfBounds) + } + if p.flags&isBitList == 0 { + return false + } + bit := BitOffset(i) + addr := p.off.addOffset(bit.offset()) + return p.seg.readUint8(addr)&bit.mask() != 0 +} + +// Set sets the i'th bit to v. +func (p BitList) Set(i int, v bool) { + if p.seg == nil || i < 0 || i >= int(p.length) { + // This is programmer error, not input error. + panic(errOutOfBounds) + } + if p.flags&isBitList == 0 { + // Again, programmer error. Should have used NewBitList. + panic(errElementSize) + } + bit := BitOffset(i) + addr := p.off.addOffset(bit.offset()) + b := p.seg.slice(addr, 1) + if v { + b[0] |= bit.mask() + } else { + b[0] &^= bit.mask() + } +} + +// String returns the list in Cap'n Proto schema format (e.g. "[true, false]"). +func (p BitList) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < p.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + if p.At(i) { + buf = append(buf, "true"...) + } else { + buf = append(buf, "false"...) + } + } + buf = append(buf, ']') + return string(buf) +} + +// A PointerList is a reference to an array of pointers. +type PointerList struct{ List } + +// NewPointerList allocates a new list of pointers, preferring placement in s. +func NewPointerList(s *Segment, n int32) (PointerList, error) { + total, ok := wordSize.times(n) + if !ok { + return PointerList{}, errOverflow + } + s, addr, err := alloc(s, total) + if err != nil { + return PointerList{}, err + } + return PointerList{List{ + seg: s, + off: addr, + length: n, + size: ObjectSize{PointerCount: 1}, + depthLimit: maxDepth, + }}, nil +} + +// At returns the i'th pointer in the list. +// +// Deprecated: Use PtrAt. +func (p PointerList) At(i int) (Pointer, error) { + pi, err := p.PtrAt(i) + return pi.toPointer(), err +} + +// PtrAt returns the i'th pointer in the list. +func (p PointerList) PtrAt(i int) (Ptr, error) { + addr, err := p.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return Ptr{}, err + } + return p.seg.readPtr(addr, p.depthLimit) +} + +// Set sets the i'th pointer in the list to v. +// +// Deprecated: Use SetPtr. +func (p PointerList) Set(i int, v Pointer) error { + return p.SetPtr(i, toPtr(v)) +} + +// SetPtr sets the i'th pointer in the list to v. +func (p PointerList) SetPtr(i int, v Ptr) error { + addr, err := p.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return err + } + return p.seg.writePtr(addr, v, false) +} + +// TextList is an array of pointers to strings. +type TextList struct{ List } + +// NewTextList allocates a new list of text pointers, preferring placement in s. +func NewTextList(s *Segment, n int32) (TextList, error) { + pl, err := NewPointerList(s, n) + if err != nil { + return TextList{}, err + } + return TextList{pl.List}, nil +} + +// At returns the i'th string in the list. +func (l TextList) At(i int) (string, error) { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return "", err + } + p, err := l.seg.readPtr(addr, l.depthLimit) + if err != nil { + return "", err + } + return p.Text(), nil +} + +// BytesAt returns the i'th element in the list as a byte slice. +// The underlying array of the slice is the segment data. +func (l TextList) BytesAt(i int) ([]byte, error) { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return nil, err + } + p, err := l.seg.readPtr(addr, l.depthLimit) + if err != nil { + return nil, err + } + return p.TextBytes(), nil +} + +// Set sets the i'th string in the list to v. +func (l TextList) Set(i int, v string) error { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return err + } + if v == "" { + return l.seg.writePtr(addr, Ptr{}, false) + } + p, err := NewText(l.seg, v) + if err != nil { + return err + } + return l.seg.writePtr(addr, p.List.ToPtr(), false) +} + +// String returns the list in Cap'n Proto schema format (e.g. `["foo", "bar"]`). +func (l TextList) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + s, err := l.BytesAt(i) + if err != nil { + buf = append(buf, ""...) + continue + } + buf = strquote.Append(buf, s) + } + buf = append(buf, ']') + return string(buf) +} + +// DataList is an array of pointers to data. +type DataList struct{ List } + +// NewDataList allocates a new list of data pointers, preferring placement in s. +func NewDataList(s *Segment, n int32) (DataList, error) { + pl, err := NewPointerList(s, n) + if err != nil { + return DataList{}, err + } + return DataList{pl.List}, nil +} + +// At returns the i'th data in the list. +func (l DataList) At(i int) ([]byte, error) { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return nil, err + } + p, err := l.seg.readPtr(addr, l.depthLimit) + if err != nil { + return nil, err + } + return p.Data(), nil +} + +// Set sets the i'th data in the list to v. +func (l DataList) Set(i int, v []byte) error { + addr, err := l.primitiveElem(i, ObjectSize{PointerCount: 1}) + if err != nil { + return err + } + if len(v) == 0 { + return l.seg.writePtr(addr, Ptr{}, false) + } + p, err := NewData(l.seg, v) + if err != nil { + return err + } + return l.seg.writePtr(addr, p.List.ToPtr(), false) +} + +// String returns the list in Cap'n Proto schema format (e.g. `["foo", "bar"]`). +func (l DataList) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + s, err := l.At(i) + if err != nil { + buf = append(buf, ""...) + continue + } + buf = strquote.Append(buf, s) + } + buf = append(buf, ']') + return string(buf) +} + +// A VoidList is a list of zero-sized elements. +type VoidList struct{ List } + +// NewVoidList creates a list of voids. No allocation is performed; +// s is only used for Segment()'s return value. +func NewVoidList(s *Segment, n int32) VoidList { + return VoidList{List{ + seg: s, + length: n, + depthLimit: maxDepth, + }} +} + +// String returns the list in Cap'n Proto schema format (e.g. "[void, void, void]"). +func (l VoidList) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = append(buf, "void"...) + } + buf = append(buf, ']') + return string(buf) +} + +// A UInt8List is an array of UInt8 values. +type UInt8List struct{ List } + +// NewUInt8List creates a new list of UInt8, preferring placement in s. +func NewUInt8List(s *Segment, n int32) (UInt8List, error) { + l, err := newPrimitiveList(s, 1, n) + if err != nil { + return UInt8List{}, err + } + return UInt8List{l}, nil +} + +// NewText creates a new list of UInt8 from a string. +func NewText(s *Segment, v string) (UInt8List, error) { + // TODO(light): error if v is too long + l, err := NewUInt8List(s, int32(len(v)+1)) + if err != nil { + return UInt8List{}, err + } + copy(l.seg.slice(l.off, Size(len(v))), v) + return l, nil +} + +// NewTextFromBytes creates a NUL-terminated list of UInt8 from a byte slice. +func NewTextFromBytes(s *Segment, v []byte) (UInt8List, error) { + // TODO(light): error if v is too long + l, err := NewUInt8List(s, int32(len(v)+1)) + if err != nil { + return UInt8List{}, err + } + copy(l.seg.slice(l.off, Size(len(v))), v) + return l, nil +} + +// NewData creates a new list of UInt8 from a byte slice. +func NewData(s *Segment, v []byte) (UInt8List, error) { + // TODO(light): error if v is too long + l, err := NewUInt8List(s, int32(len(v))) + if err != nil { + return UInt8List{}, err + } + copy(l.seg.slice(l.off, Size(len(v))), v) + return l, nil +} + +// ToText attempts to convert p into Text. +// +// Deprecated: Use Ptr.Text. +func ToText(p Pointer) string { + return toPtr(p).TextDefault("") +} + +// ToTextDefault attempts to convert p into Text, returning def on failure. +// +// Deprecated: Use Ptr.TextDefault. +func ToTextDefault(p Pointer, def string) string { + return toPtr(p).TextDefault(def) +} + +// ToData attempts to convert p into Data. +// +// Deprecated: Use Ptr.Data. +func ToData(p Pointer) []byte { + return toPtr(p).DataDefault(nil) +} + +// ToDataDefault attempts to convert p into Data, returning def on failure. +// +// Deprecated: Use Ptr.DataDefault. +func ToDataDefault(p Pointer, def []byte) []byte { + return toPtr(p).DataDefault(def) +} + +func isOneByteList(p Ptr) bool { + return p.seg != nil && p.flags.ptrType() == listPtrType && p.size.isOneByte() && p.flags.listFlags()&isCompositeList == 0 +} + +// At returns the i'th element. +func (l UInt8List) At(i int) uint8 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 1}) + if err != nil { + return 0 + } + return l.seg.readUint8(addr) +} + +// Set sets the i'th element to v. +func (l UInt8List) Set(i int, v uint8) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 1}) + if err != nil { + panic(err) + } + l.seg.writeUint8(addr, v) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l UInt8List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendUint(buf, uint64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Int8List is an array of Int8 values. +type Int8List struct{ List } + +// NewInt8List creates a new list of Int8, preferring placement in s. +func NewInt8List(s *Segment, n int32) (Int8List, error) { + l, err := newPrimitiveList(s, 1, n) + if err != nil { + return Int8List{}, err + } + return Int8List{l}, nil +} + +// At returns the i'th element. +func (l Int8List) At(i int) int8 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 1}) + if err != nil { + return 0 + } + return int8(l.seg.readUint8(addr)) +} + +// Set sets the i'th element to v. +func (l Int8List) Set(i int, v int8) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 1}) + if err != nil { + panic(err) + } + l.seg.writeUint8(addr, uint8(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Int8List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendInt(buf, int64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// A UInt16List is an array of UInt16 values. +type UInt16List struct{ List } + +// NewUInt16List creates a new list of UInt16, preferring placement in s. +func NewUInt16List(s *Segment, n int32) (UInt16List, error) { + l, err := newPrimitiveList(s, 2, n) + if err != nil { + return UInt16List{}, err + } + return UInt16List{l}, nil +} + +// At returns the i'th element. +func (l UInt16List) At(i int) uint16 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 2}) + if err != nil { + return 0 + } + return l.seg.readUint16(addr) +} + +// Set sets the i'th element to v. +func (l UInt16List) Set(i int, v uint16) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 2}) + if err != nil { + panic(err) + } + l.seg.writeUint16(addr, v) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l UInt16List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendUint(buf, uint64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Int16List is an array of Int16 values. +type Int16List struct{ List } + +// NewInt16List creates a new list of Int16, preferring placement in s. +func NewInt16List(s *Segment, n int32) (Int16List, error) { + l, err := newPrimitiveList(s, 2, n) + if err != nil { + return Int16List{}, err + } + return Int16List{l}, nil +} + +// At returns the i'th element. +func (l Int16List) At(i int) int16 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 2}) + if err != nil { + return 0 + } + return int16(l.seg.readUint16(addr)) +} + +// Set sets the i'th element to v. +func (l Int16List) Set(i int, v int16) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 2}) + if err != nil { + panic(err) + } + l.seg.writeUint16(addr, uint16(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Int16List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendInt(buf, int64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// UInt32List is an array of UInt32 values. +type UInt32List struct{ List } + +// NewUInt32List creates a new list of UInt32, preferring placement in s. +func NewUInt32List(s *Segment, n int32) (UInt32List, error) { + l, err := newPrimitiveList(s, 4, n) + if err != nil { + return UInt32List{}, err + } + return UInt32List{l}, nil +} + +// At returns the i'th element. +func (l UInt32List) At(i int) uint32 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + return 0 + } + return l.seg.readUint32(addr) +} + +// Set sets the i'th element to v. +func (l UInt32List) Set(i int, v uint32) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + panic(err) + } + l.seg.writeUint32(addr, v) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l UInt32List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendUint(buf, uint64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Int32List is an array of Int32 values. +type Int32List struct{ List } + +// NewInt32List creates a new list of Int32, preferring placement in s. +func NewInt32List(s *Segment, n int32) (Int32List, error) { + l, err := newPrimitiveList(s, 4, n) + if err != nil { + return Int32List{}, err + } + return Int32List{l}, nil +} + +// At returns the i'th element. +func (l Int32List) At(i int) int32 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + return 0 + } + return int32(l.seg.readUint32(addr)) +} + +// Set sets the i'th element to v. +func (l Int32List) Set(i int, v int32) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + panic(err) + } + l.seg.writeUint32(addr, uint32(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Int32List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendInt(buf, int64(l.At(i)), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// UInt64List is an array of UInt64 values. +type UInt64List struct{ List } + +// NewUInt64List creates a new list of UInt64, preferring placement in s. +func NewUInt64List(s *Segment, n int32) (UInt64List, error) { + l, err := newPrimitiveList(s, 8, n) + if err != nil { + return UInt64List{}, err + } + return UInt64List{l}, nil +} + +// At returns the i'th element. +func (l UInt64List) At(i int) uint64 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + return 0 + } + return l.seg.readUint64(addr) +} + +// Set sets the i'th element to v. +func (l UInt64List) Set(i int, v uint64) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + panic(err) + } + l.seg.writeUint64(addr, v) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l UInt64List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendUint(buf, l.At(i), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Int64List is an array of Int64 values. +type Int64List struct{ List } + +// NewInt64List creates a new list of Int64, preferring placement in s. +func NewInt64List(s *Segment, n int32) (Int64List, error) { + l, err := newPrimitiveList(s, 8, n) + if err != nil { + return Int64List{}, err + } + return Int64List{l}, nil +} + +// At returns the i'th element. +func (l Int64List) At(i int) int64 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + return 0 + } + return int64(l.seg.readUint64(addr)) +} + +// Set sets the i'th element to v. +func (l Int64List) Set(i int, v int64) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + panic(err) + } + l.seg.writeUint64(addr, uint64(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Int64List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendInt(buf, l.At(i), 10) + } + buf = append(buf, ']') + return string(buf) +} + +// Float32List is an array of Float32 values. +type Float32List struct{ List } + +// NewFloat32List creates a new list of Float32, preferring placement in s. +func NewFloat32List(s *Segment, n int32) (Float32List, error) { + l, err := newPrimitiveList(s, 4, n) + if err != nil { + return Float32List{}, err + } + return Float32List{l}, nil +} + +// At returns the i'th element. +func (l Float32List) At(i int) float32 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + return 0 + } + return math.Float32frombits(l.seg.readUint32(addr)) +} + +// Set sets the i'th element to v. +func (l Float32List) Set(i int, v float32) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 4}) + if err != nil { + panic(err) + } + l.seg.writeUint32(addr, math.Float32bits(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Float32List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendFloat(buf, float64(l.At(i)), 'g', -1, 32) + } + buf = append(buf, ']') + return string(buf) +} + +// Float64List is an array of Float64 values. +type Float64List struct{ List } + +// NewFloat64List creates a new list of Float64, preferring placement in s. +func NewFloat64List(s *Segment, n int32) (Float64List, error) { + l, err := newPrimitiveList(s, 8, n) + if err != nil { + return Float64List{}, err + } + return Float64List{l}, nil +} + +// At returns the i'th element. +func (l Float64List) At(i int) float64 { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + return 0 + } + return math.Float64frombits(l.seg.readUint64(addr)) +} + +// Set sets the i'th element to v. +func (l Float64List) Set(i int, v float64) { + addr, err := l.primitiveElem(i, ObjectSize{DataSize: 8}) + if err != nil { + panic(err) + } + l.seg.writeUint64(addr, math.Float64bits(v)) +} + +// String returns the list in Cap'n Proto schema format (e.g. "[1, 2, 3]"). +func (l Float64List) String() string { + var buf []byte + buf = append(buf, '[') + for i := 0; i < l.Len(); i++ { + if i > 0 { + buf = append(buf, ", "...) + } + buf = strconv.AppendFloat(buf, l.At(i), 'g', -1, 64) + } + buf = append(buf, ']') + return string(buf) +} + +type listFlags uint8 + +const ( + isCompositeList listFlags = 1 << iota + isBitList +) + +var errBitListStruct = errors.New("capnp: SetStruct called on bit list") diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem.go new file mode 100644 index 00000000..e3ee869a --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem.go @@ -0,0 +1,913 @@ +package capnp + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" + + "zombiezen.com/go/capnproto2/internal/packed" +) + +// Security limits. Matches C++ implementation. +const ( + defaultTraverseLimit = 64 << 20 // 64 MiB + defaultDepthLimit = 64 + + maxStreamSegments = 512 + + defaultDecodeLimit = 64 << 20 // 64 MiB +) + +const maxDepth = ^uint(0) + +// A Message is a tree of Cap'n Proto objects, split into one or more +// segments of contiguous memory. The only required field is Arena. +// A Message is safe to read from multiple goroutines. +type Message struct { + // rlimit must be first so that it is 64-bit aligned. + // See sync/atomic docs. + rlimit ReadLimiter + rlimitInit sync.Once + + Arena Arena + + // CapTable is the indexed list of the clients referenced in the + // message. Capability pointers inside the message will use this table + // to map pointers to Clients. The table is usually populated by the + // RPC system. + // + // See https://capnproto.org/encoding.html#capabilities-interfaces for + // more details on the capability table. + CapTable []Client + + // TraverseLimit limits how many total bytes of data are allowed to be + // traversed while reading. Traversal is counted when a Struct or + // List is obtained. This means that calling a getter for the same + // sub-struct multiple times will cause it to be double-counted. Once + // the traversal limit is reached, pointer accessors will report + // errors. See https://capnproto.org/encoding.html#amplification-attack + // for more details on this security measure. + // + // If not set, this defaults to 64 MiB. + TraverseLimit uint64 + + // DepthLimit limits how deeply-nested a message structure can be. + // If not set, this defaults to 64. + DepthLimit uint + + // mu protects the following fields: + mu sync.Mutex + segs map[SegmentID]*Segment + firstSeg Segment // Preallocated first segment. msg is non-nil once initialized. +} + +// NewMessage creates a message with a new root and returns the first +// segment. It is an error to call NewMessage on an arena with data in it. +func NewMessage(arena Arena) (msg *Message, first *Segment, err error) { + msg = &Message{Arena: arena} + switch arena.NumSegments() { + case 0: + first, err = msg.allocSegment(wordSize) + if err != nil { + return nil, nil, err + } + case 1: + first, err = msg.Segment(0) + if err != nil { + return nil, nil, err + } + if len(first.data) > 0 { + return nil, nil, errHasData + } + default: + return nil, nil, errHasData + } + if first.ID() != 0 { + return nil, nil, errors.New("capnp: arena allocated first segment with non-zero ID") + } + seg, _, err := alloc(first, wordSize) // allocate root + if err != nil { + return nil, nil, err + } + if seg != first { + return nil, nil, errors.New("capnp: arena didn't allocate first word in first segment") + } + return msg, first, nil +} + +// Reset resets a message to use a different arena, allowing a single +// Message to be reused for reading multiple messages. This invalidates +// any existing pointers in the Message, so use with caution. +func (m *Message) Reset(arena Arena) { + m.mu.Lock() + m.Arena = arena + m.CapTable = nil + m.segs = nil + m.firstSeg = Segment{} + m.mu.Unlock() + if m.TraverseLimit == 0 { + m.ReadLimiter().Reset(defaultTraverseLimit) + } else { + m.ReadLimiter().Reset(m.TraverseLimit) + } +} + +// Root returns the pointer to the message's root object. +// +// Deprecated: Use RootPtr. +func (m *Message) Root() (Pointer, error) { + p, err := m.RootPtr() + return p.toPointer(), err +} + +// RootPtr returns the pointer to the message's root object. +func (m *Message) RootPtr() (Ptr, error) { + s, err := m.Segment(0) + if err != nil { + return Ptr{}, err + } + return s.root().PtrAt(0) +} + +// SetRoot sets the message's root object to p. +// +// Deprecated: Use SetRootPtr. +func (m *Message) SetRoot(p Pointer) error { + return m.SetRootPtr(toPtr(p)) +} + +// SetRootPtr sets the message's root object to p. +func (m *Message) SetRootPtr(p Ptr) error { + s, err := m.Segment(0) + if err != nil { + return err + } + return s.root().SetPtr(0, p) +} + +// AddCap appends a capability to the message's capability table and +// returns its ID. +func (m *Message) AddCap(c Client) CapabilityID { + n := CapabilityID(len(m.CapTable)) + m.CapTable = append(m.CapTable, c) + return n +} + +// ReadLimiter returns the message's read limiter. Useful if you want +// to reset the traversal limit while reading. +func (m *Message) ReadLimiter() *ReadLimiter { + m.rlimitInit.Do(func() { + if m.TraverseLimit == 0 { + m.rlimit.limit = defaultTraverseLimit + } else { + m.rlimit.limit = m.TraverseLimit + } + }) + return &m.rlimit +} + +func (m *Message) depthLimit() uint { + if m.DepthLimit != 0 { + return m.DepthLimit + } + return defaultDepthLimit +} + +// NumSegments returns the number of segments in the message. +func (m *Message) NumSegments() int64 { + return int64(m.Arena.NumSegments()) +} + +// Segment returns the segment with the given ID. +func (m *Message) Segment(id SegmentID) (*Segment, error) { + if isInt32Bit && id > maxInt32 { + return nil, errSegment32Bit + } + if int64(id) >= m.Arena.NumSegments() { + return nil, errSegmentOutOfBounds + } + m.mu.Lock() + if seg := m.segment(id); seg != nil { + m.mu.Unlock() + return seg, nil + } + data, err := m.Arena.Data(id) + if err != nil { + m.mu.Unlock() + return nil, err + } + seg := m.setSegment(id, data) + m.mu.Unlock() + return seg, nil +} + +// segment returns the segment with the given ID. +// The caller must be holding m.mu. +func (m *Message) segment(id SegmentID) *Segment { + if m.segs == nil { + if id == 0 && m.firstSeg.msg != nil { + return &m.firstSeg + } + return nil + } + return m.segs[id] +} + +// setSegment creates or updates the Segment with the given ID. +// The caller must be holding m.mu. +func (m *Message) setSegment(id SegmentID, data []byte) *Segment { + if m.segs == nil { + if id == 0 { + m.firstSeg = Segment{ + id: id, + msg: m, + data: data, + } + return &m.firstSeg + } + m.segs = make(map[SegmentID]*Segment) + if m.firstSeg.msg != nil { + m.segs[0] = &m.firstSeg + } + } else if seg := m.segs[id]; seg != nil { + seg.data = data + return seg + } + seg := &Segment{ + id: id, + msg: m, + data: data, + } + m.segs[id] = seg + return seg +} + +// allocSegment creates or resizes an existing segment such that +// cap(seg.Data) - len(seg.Data) >= sz. +func (m *Message) allocSegment(sz Size) (*Segment, error) { + m.mu.Lock() + if m.segs == nil && m.firstSeg.msg != nil { + m.segs = make(map[SegmentID]*Segment) + m.segs[0] = &m.firstSeg + } + id, data, err := m.Arena.Allocate(sz, m.segs) + if err != nil { + m.mu.Unlock() + return nil, err + } + if isInt32Bit && id > maxInt32 { + m.mu.Unlock() + return nil, errSegment32Bit + } + seg := m.setSegment(id, data) + m.mu.Unlock() + return seg, nil +} + +// alloc allocates sz zero-filled bytes. It prefers using s, but may +// use a different segment in the same message if there's not sufficient +// capacity. +func alloc(s *Segment, sz Size) (*Segment, Address, error) { + sz = sz.padToWord() + if sz > maxSize-wordSize { + return nil, 0, errOverflow + } + + if !hasCapacity(s.data, sz) { + var err error + s, err = s.msg.allocSegment(sz) + if err != nil { + return nil, 0, err + } + } + + addr := Address(len(s.data)) + end, ok := addr.addSize(sz) + if !ok { + return nil, 0, errOverflow + } + space := s.data[len(s.data):end] + s.data = s.data[:end] + for i := range space { + space[i] = 0 + } + return s, addr, nil +} + +// An Arena loads and allocates segments for a Message. +type Arena interface { + // NumSegments returns the number of segments in the arena. + // This must not be larger than 1<<32. + NumSegments() int64 + + // Data loads the data for the segment with the given ID. IDs are in + // the range [0, NumSegments()). + // must be tightly packed in the range [0, NumSegments()). + Data(id SegmentID) ([]byte, error) + + // Allocate selects a segment to place a new object in, creating a + // segment or growing the capacity of a previously loaded segment if + // necessary. If Allocate does not return an error, then the + // difference of the capacity and the length of the returned slice + // must be at least minsz. segs is a map of segment slices returned + // by the Data method keyed by ID (although the length of these slices + // may have changed by previous allocations). Allocate must not + // modify segs. + // + // If Allocate creates a new segment, the ID must be one larger than + // the last segment's ID or zero if it is the first segment. + // + // If Allocate returns an previously loaded segment's ID, then the + // arena is responsible for preserving the existing data in the + // returned byte slice. + Allocate(minsz Size, segs map[SegmentID]*Segment) (SegmentID, []byte, error) +} + +type singleSegmentArena []byte + +// SingleSegment returns a new arena with an expanding single-segment +// buffer. b can be used to populate the segment for reading or to +// reserve memory of a specific size. A SingleSegment arena does not +// return errors unless you attempt to access another segment. +func SingleSegment(b []byte) Arena { + ssa := new(singleSegmentArena) + *ssa = b + return ssa +} + +func (ssa *singleSegmentArena) NumSegments() int64 { + return 1 +} + +func (ssa *singleSegmentArena) Data(id SegmentID) ([]byte, error) { + if id != 0 { + return nil, errSegmentOutOfBounds + } + return *ssa, nil +} + +func (ssa *singleSegmentArena) Allocate(sz Size, segs map[SegmentID]*Segment) (SegmentID, []byte, error) { + data := []byte(*ssa) + if segs[0] != nil { + data = segs[0].data + } + if len(data)%int(wordSize) != 0 { + return 0, nil, errors.New("capnp: segment size is not a multiple of word size") + } + if hasCapacity(data, sz) { + return 0, data, nil + } + inc, err := nextAlloc(int64(len(data)), int64(maxSegmentSize()), sz) + if err != nil { + return 0, nil, fmt.Errorf("capnp: alloc %d bytes: %v", sz, err) + } + buf := make([]byte, len(data), cap(data)+inc) + copy(buf, data) + *ssa = buf + return 0, *ssa, nil +} + +type roSingleSegment []byte + +func (ss roSingleSegment) NumSegments() int64 { + return 1 +} + +func (ss roSingleSegment) Data(id SegmentID) ([]byte, error) { + if id != 0 { + return nil, errSegmentOutOfBounds + } + return ss, nil +} + +func (ss roSingleSegment) Allocate(sz Size, segs map[SegmentID]*Segment) (SegmentID, []byte, error) { + return 0, nil, errors.New("capnp: segment is read-only") +} + +type multiSegmentArena [][]byte + +// MultiSegment returns a new arena that allocates new segments when +// they are full. b can be used to populate the buffer for reading or +// to reserve memory of a specific size. +func MultiSegment(b [][]byte) Arena { + msa := new(multiSegmentArena) + *msa = b + return msa +} + +// demuxArena slices b into a multi-segment arena. +func demuxArena(hdr streamHeader, data []byte) (Arena, error) { + segs := make([][]byte, int(hdr.maxSegment())+1) + for i := range segs { + sz, err := hdr.segmentSize(uint32(i)) + if err != nil { + return nil, err + } + segs[i], data = data[:sz:sz], data[sz:] + } + return MultiSegment(segs), nil +} + +func (msa *multiSegmentArena) NumSegments() int64 { + return int64(len(*msa)) +} + +func (msa *multiSegmentArena) Data(id SegmentID) ([]byte, error) { + if int64(id) >= int64(len(*msa)) { + return nil, errSegmentOutOfBounds + } + return (*msa)[id], nil +} + +func (msa *multiSegmentArena) Allocate(sz Size, segs map[SegmentID]*Segment) (SegmentID, []byte, error) { + var total int64 + for i, data := range *msa { + id := SegmentID(i) + if s := segs[id]; s != nil { + data = s.data + } + if hasCapacity(data, sz) { + return id, data, nil + } + total += int64(cap(data)) + if total < 0 { + // Overflow. + return 0, nil, fmt.Errorf("capnp: alloc %d bytes: message too large", sz) + } + } + n, err := nextAlloc(total, 1<<63-1, sz) + if err != nil { + return 0, nil, fmt.Errorf("capnp: alloc %d bytes: %v", sz, err) + } + buf := make([]byte, 0, n) + id := SegmentID(len(*msa)) + *msa = append(*msa, buf) + return id, buf, nil +} + +// nextAlloc computes how much more space to allocate given the number +// of bytes allocated in the entire message and the requested number of +// bytes. It will always return a multiple of wordSize. max must be a +// multiple of wordSize. The sum of curr and the returned size will +// always be less than max. +func nextAlloc(curr, max int64, req Size) (int, error) { + if req == 0 { + return 0, nil + } + maxinc := int64(1<<32 - 8) // largest word-aligned Size + if isInt32Bit { + maxinc = 1<<31 - 8 // largest word-aligned int + } + if int64(req) > maxinc { + return 0, errors.New("allocation too large") + } + req = req.padToWord() + want := curr + int64(req) + if want <= curr || want > max { + return 0, errors.New("allocation overflows message size") + } + new := curr + double := new + new + switch { + case want < 1024: + next := (1024 - curr + 7) &^ 7 + if next < curr { + return int((curr + 7) &^ 7), nil + } + return int(next), nil + case want > double: + return int(req), nil + default: + for 0 < new && new < want { + new += new / 4 + } + if new <= 0 { + return int(req), nil + } + delta := new - curr + if delta > maxinc { + return int(maxinc), nil + } + return int((delta + 7) &^ 7), nil + } +} + +// A Decoder represents a framer that deserializes a particular Cap'n +// Proto input stream. +type Decoder struct { + r io.Reader + + segbuf [msgHeaderSize]byte + hdrbuf []byte + + reuse bool + buf []byte + msg Message + arena roSingleSegment + + // Maximum number of bytes that can be read per call to Decode. + // If not set, a reasonable default is used. + MaxMessageSize uint64 +} + +// NewDecoder creates a new Cap'n Proto framer that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// NewPackedDecoder creates a new Cap'n Proto framer that reads from a +// packed stream r. +func NewPackedDecoder(r io.Reader) *Decoder { + return NewDecoder(packed.NewReader(bufio.NewReader(r))) +} + +// Decode reads a message from the decoder stream. +func (d *Decoder) Decode() (*Message, error) { + maxSize := d.MaxMessageSize + if maxSize == 0 { + maxSize = defaultDecodeLimit + } + if _, err := io.ReadFull(d.r, d.segbuf[:]); err != nil { + return nil, err + } + maxSeg := binary.LittleEndian.Uint32(d.segbuf[:]) + if maxSeg > maxStreamSegments { + return nil, errTooManySegments + } + hdrSize := streamHeaderSize(maxSeg) + if hdrSize > maxSize || hdrSize > (1<<31-1) { + return nil, errDecodeLimit + } + d.hdrbuf = resizeSlice(d.hdrbuf, int(hdrSize)) + copy(d.hdrbuf, d.segbuf[:]) + if _, err := io.ReadFull(d.r, d.hdrbuf[msgHeaderSize:]); err != nil { + return nil, err + } + hdr, _, err := parseStreamHeader(d.hdrbuf) + if err != nil { + return nil, err + } + total, err := hdr.totalSize() + if err != nil { + return nil, err + } + // TODO(someday): if total size is greater than can fit in one buffer, + // attempt to allocate buffer per segment. + if total > maxSize-hdrSize || total > (1<<31-1) { + return nil, errDecodeLimit + } + if !d.reuse { + buf := make([]byte, int(total)) + if _, err := io.ReadFull(d.r, buf); err != nil { + return nil, err + } + arena, err := demuxArena(hdr, buf) + if err != nil { + return nil, err + } + return &Message{Arena: arena}, nil + } + d.buf = resizeSlice(d.buf, int(total)) + if _, err := io.ReadFull(d.r, d.buf); err != nil { + return nil, err + } + var arena Arena + if hdr.maxSegment() == 0 { + d.arena = d.buf[:len(d.buf):len(d.buf)] + arena = &d.arena + } else { + var err error + arena, err = demuxArena(hdr, d.buf) + if err != nil { + return nil, err + } + } + d.msg.Reset(arena) + return &d.msg, nil +} + +func resizeSlice(b []byte, size int) []byte { + if cap(b) < size { + return make([]byte, size) + } + return b[:size] +} + +// ReuseBuffer causes the decoder to reuse its buffer on subsequent decodes. +// The decoder may return messages that cannot handle allocations. +func (d *Decoder) ReuseBuffer() { + d.reuse = true +} + +// Unmarshal reads an unpacked serialized stream into a message. No +// copying is performed, so the objects in the returned message read +// directly from data. +func Unmarshal(data []byte) (*Message, error) { + if len(data) == 0 { + return nil, io.EOF + } + hdr, data, err := parseStreamHeader(data) + if err != nil { + return nil, err + } + if tot, err := hdr.totalSize(); err != nil { + return nil, err + } else if tot > uint64(len(data)) { + return nil, io.ErrUnexpectedEOF + } + arena, err := demuxArena(hdr, data) + if err != nil { + return nil, err + } + return &Message{Arena: arena}, nil +} + +// UnmarshalPacked reads a packed serialized stream into a message. +func UnmarshalPacked(data []byte) (*Message, error) { + if len(data) == 0 { + return nil, io.EOF + } + data, err := packed.Unpack(nil, data) + if err != nil { + return nil, err + } + return Unmarshal(data) +} + +// MustUnmarshalRoot reads an unpacked serialized stream and returns +// its root pointer. If there is any error, it panics. +// +// Deprecated: Use MustUnmarshalRootPtr. +func MustUnmarshalRoot(data []byte) Pointer { + msg, err := Unmarshal(data) + if err != nil { + panic(err) + } + p, err := msg.Root() + if err != nil { + panic(err) + } + return p +} + +// MustUnmarshalRootPtr reads an unpacked serialized stream and returns +// its root pointer. If there is any error, it panics. +func MustUnmarshalRootPtr(data []byte) Ptr { + msg, err := Unmarshal(data) + if err != nil { + panic(err) + } + p, err := msg.RootPtr() + if err != nil { + panic(err) + } + return p +} + +// An Encoder represents a framer for serializing a particular Cap'n +// Proto stream. +type Encoder struct { + w io.Writer + hdrbuf []byte + bufs [][]byte + + packed bool + packbuf []byte +} + +// NewEncoder creates a new Cap'n Proto framer that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} + +// NewPackedEncoder creates a new Cap'n Proto framer that writes to a +// packed stream w. +func NewPackedEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, packed: true} +} + +// Encode writes a message to the encoder stream. +func (e *Encoder) Encode(m *Message) error { + nsegs := m.NumSegments() + if nsegs == 0 { + return errMessageEmpty + } + e.bufs = append(e.bufs[:0], nil) // first element is placeholder for header + maxSeg := uint32(nsegs - 1) + hdrSize := streamHeaderSize(maxSeg) + if uint64(cap(e.hdrbuf)) < hdrSize { + e.hdrbuf = make([]byte, 0, hdrSize) + } + e.hdrbuf = appendUint32(e.hdrbuf[:0], maxSeg) + for i := int64(0); i < nsegs; i++ { + s, err := m.Segment(SegmentID(i)) + if err != nil { + return err + } + n := len(s.data) + if int64(n) > int64(maxSize) { + return errSegmentTooLarge + } + e.hdrbuf = appendUint32(e.hdrbuf, uint32(Size(n)/wordSize)) + e.bufs = append(e.bufs, s.data) + } + if len(e.hdrbuf)%int(wordSize) != 0 { + e.hdrbuf = appendUint32(e.hdrbuf, 0) + } + e.bufs[0] = e.hdrbuf + if e.packed { + return e.writePacked(e.bufs) + } + return e.write(e.bufs) +} + +func (e *Encoder) writePacked(bufs [][]byte) error { + for _, b := range bufs { + e.packbuf = packed.Pack(e.packbuf[:0], b) + if _, err := e.w.Write(e.packbuf); err != nil { + return err + } + } + return nil +} + +func (m *Message) segmentSizes() ([]Size, error) { + nsegs := m.NumSegments() + sizes := make([]Size, nsegs) + for i := int64(0); i < nsegs; i++ { + s, err := m.Segment(SegmentID(i)) + if err != nil { + return sizes[:i], err + } + n := len(s.data) + if int64(n) > int64(maxSize) { + return sizes[:i], errSegmentTooLarge + } + sizes[i] = Size(n) + } + return sizes, nil +} + +// Marshal concatenates the segments in the message into a single byte +// slice including framing. +func (m *Message) Marshal() ([]byte, error) { + // Compute buffer size. + // TODO(light): error out if too many segments + nsegs := m.NumSegments() + if nsegs == 0 { + return nil, errMessageEmpty + } + maxSeg := uint32(nsegs - 1) + hdrSize := streamHeaderSize(maxSeg) + sizes, err := m.segmentSizes() + if err != nil { + return nil, err + } + // TODO(light): error out if too large + total := uint64(hdrSize) + totalSize(sizes) + + // Fill in buffer. + buf := make([]byte, hdrSize, total) + // TODO: remove marshalStreamHeader and inline. + marshalStreamHeader(buf, sizes) + for i := int64(0); i < nsegs; i++ { + s, err := m.Segment(SegmentID(i)) + if err != nil { + return nil, err + } + buf = append(buf, s.data...) + } + return buf, nil +} + +// MarshalPacked marshals the message in packed form. +func (m *Message) MarshalPacked() ([]byte, error) { + data, err := m.Marshal() + if err != nil { + return nil, err + } + buf := make([]byte, 0, len(data)) + buf = packed.Pack(buf, data) + return buf, nil +} + +// Stream header sizes. +const ( + msgHeaderSize = 4 + segHeaderSize = 4 +) + +// streamHeaderSize returns the size of the header, given the +// first 32-bit number. +func streamHeaderSize(n uint32) uint64 { + return (msgHeaderSize + segHeaderSize*(uint64(n)+1) + 7) &^ 7 +} + +// marshalStreamHeader marshals the sizes into the byte slice, which +// must be of size streamHeaderSize(len(sizes) - 1). +// +// TODO: remove marshalStreamHeader and inline. +func marshalStreamHeader(b []byte, sizes []Size) { + binary.LittleEndian.PutUint32(b, uint32(len(sizes)-1)) + for i, sz := range sizes { + loc := msgHeaderSize + i*segHeaderSize + binary.LittleEndian.PutUint32(b[loc:], uint32(sz/Size(wordSize))) + } +} + +// appendUint32 appends a uint32 to a byte slice and returns the +// new slice. +func appendUint32(b []byte, v uint32) []byte { + b = append(b, 0, 0, 0, 0) + binary.LittleEndian.PutUint32(b[len(b)-4:], v) + return b +} + +type streamHeader struct { + b []byte +} + +// parseStreamHeader parses the header of the stream framing format. +func parseStreamHeader(data []byte) (h streamHeader, tail []byte, err error) { + if uint64(len(data)) < streamHeaderSize(0) { + return streamHeader{}, nil, io.ErrUnexpectedEOF + } + maxSeg := binary.LittleEndian.Uint32(data) + // TODO(light): check int + hdrSize := streamHeaderSize(maxSeg) + if uint64(len(data)) < hdrSize { + return streamHeader{}, nil, io.ErrUnexpectedEOF + } + return streamHeader{b: data}, data[hdrSize:], nil +} + +func (h streamHeader) maxSegment() uint32 { + return binary.LittleEndian.Uint32(h.b) +} + +func (h streamHeader) segmentSize(i uint32) (Size, error) { + s := binary.LittleEndian.Uint32(h.b[msgHeaderSize+i*segHeaderSize:]) + sz, ok := wordSize.times(int32(s)) + if !ok { + return 0, errSegmentTooLarge + } + return sz, nil +} + +func (h streamHeader) totalSize() (uint64, error) { + var sum uint64 + for i := uint64(0); i <= uint64(h.maxSegment()); i++ { + x, err := h.segmentSize(uint32(i)) + if err != nil { + return sum, err + } + sum += uint64(x) + } + return sum, nil +} + +func hasCapacity(b []byte, sz Size) bool { + return sz <= Size(cap(b)-len(b)) +} + +func totalSize(s []Size) uint64 { + var sum uint64 + for _, sz := range s { + sum += uint64(sz) + } + return sum +} + +const ( + maxInt32 = 0x7fffffff + maxInt = int(^uint(0) >> 1) + + isInt32Bit = maxInt == maxInt32 +) + +// maxSegmentSize returns the maximum permitted size of a single segment +// on this platform. +// +// This is effectively a compile-time constant, but can't be represented +// as a constant because it requires a conditional. It is trivially +// inlinable and optimizable, so should act like one. +func maxSegmentSize() Size { + if isInt32Bit { + return Size(maxInt32 - 7) + } else { + return maxSize - 7 + } +} + +var ( + errSegmentOutOfBounds = errors.New("capnp: segment ID out of bounds") + errSegment32Bit = errors.New("capnp: segment ID larger than 31 bits") + errMessageEmpty = errors.New("capnp: marshalling an empty message") + errHasData = errors.New("capnp: NewMessage called on arena with data") + errSegmentTooLarge = errors.New("capnp: segment too large") + errTooManySegments = errors.New("capnp: too many segments to decode") + errDecodeLimit = errors.New("capnp: message too large") +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_18.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_18.go new file mode 100644 index 00000000..d2853072 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_18.go @@ -0,0 +1,10 @@ +// +build go1.8 + +package capnp + +import "net" + +func (e *Encoder) write(bufs [][]byte) error { + _, err := (*net.Buffers)(&bufs).WriteTo(e.w) + return err +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_other.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_other.go new file mode 100644 index 00000000..ba1ab667 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/mem_other.go @@ -0,0 +1,12 @@ +// +build !go1.8 + +package capnp + +func (e *Encoder) write(bufs [][]byte) error { + for _, b := range bufs { + if _, err := e.w.Write(b); err != nil { + return err + } + } + return nil +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/pointer.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/pointer.go new file mode 100644 index 00000000..9b69e6b3 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/pointer.go @@ -0,0 +1,304 @@ +package capnp + +// A Ptr is a reference to a Cap'n Proto struct, list, or interface. +// The zero value is a null pointer. +type Ptr struct { + seg *Segment + off Address + lenOrCap uint32 + size ObjectSize + depthLimit uint + flags ptrFlags +} + +func toPtr(p Pointer) Ptr { + if p == nil { + return Ptr{} + } + switch p := p.underlying().(type) { + case Struct: + return p.ToPtr() + case List: + return p.ToPtr() + case Interface: + return p.ToPtr() + } + return Ptr{} +} + +// Struct converts p to a Struct. If p does not hold a Struct pointer, +// the zero value is returned. +func (p Ptr) Struct() Struct { + if p.flags.ptrType() != structPtrType { + return Struct{} + } + return Struct{ + seg: p.seg, + off: p.off, + size: p.size, + flags: p.flags.structFlags(), + depthLimit: p.depthLimit, + } +} + +// StructDefault attempts to convert p into a struct, reading the +// default value from def if p is not a struct. +func (p Ptr) StructDefault(def []byte) (Struct, error) { + s := p.Struct() + if s.seg == nil { + if def == nil { + return Struct{}, nil + } + defp, err := unmarshalDefault(def) + if err != nil { + return Struct{}, err + } + return defp.Struct(), nil + } + return s, nil +} + +// List converts p to a List. If p does not hold a List pointer, +// the zero value is returned. +func (p Ptr) List() List { + if p.flags.ptrType() != listPtrType { + return List{} + } + return List{ + seg: p.seg, + off: p.off, + length: int32(p.lenOrCap), + size: p.size, + flags: p.flags.listFlags(), + depthLimit: p.depthLimit, + } +} + +// ListDefault attempts to convert p into a list, reading the default +// value from def if p is not a list. +func (p Ptr) ListDefault(def []byte) (List, error) { + l := p.List() + if l.seg == nil { + if def == nil { + return List{}, nil + } + defp, err := unmarshalDefault(def) + if err != nil { + return List{}, err + } + return defp.List(), nil + } + return l, nil +} + +// Interface converts p to an Interface. If p does not hold a List +// pointer, the zero value is returned. +func (p Ptr) Interface() Interface { + if p.flags.ptrType() != interfacePtrType { + return Interface{} + } + return Interface{ + seg: p.seg, + cap: CapabilityID(p.lenOrCap), + } +} + +// Text attempts to convert p into Text, returning an empty string if +// p is not a valid 1-byte list pointer. +func (p Ptr) Text() string { + b, ok := p.text() + if !ok { + return "" + } + return string(b) +} + +// TextDefault attempts to convert p into Text, returning def if p is +// not a valid 1-byte list pointer. +func (p Ptr) TextDefault(def string) string { + b, ok := p.text() + if !ok { + return def + } + return string(b) +} + +// TextBytes attempts to convert p into Text, returning nil if p is not +// a valid 1-byte list pointer. It returns a slice directly into the +// segment. +func (p Ptr) TextBytes() []byte { + b, ok := p.text() + if !ok { + return nil + } + return b +} + +// TextBytesDefault attempts to convert p into Text, returning def if p +// is not a valid 1-byte list pointer. It returns a slice directly into +// the segment. +func (p Ptr) TextBytesDefault(def string) []byte { + b, ok := p.text() + if !ok { + return []byte(def) + } + return b +} + +func (p Ptr) text() (b []byte, ok bool) { + if !isOneByteList(p) { + return nil, false + } + l := p.List() + b = l.seg.slice(l.off, Size(l.length)) + if len(b) == 0 || b[len(b)-1] != 0 { + // Text must be null-terminated. + return nil, false + } + return b[:len(b)-1 : len(b)], true +} + +// Data attempts to convert p into Data, returning nil if p is not a +// valid 1-byte list pointer. +func (p Ptr) Data() []byte { + return p.DataDefault(nil) +} + +// DataDefault attempts to convert p into Data, returning def if p is +// not a valid 1-byte list pointer. +func (p Ptr) DataDefault(def []byte) []byte { + if !isOneByteList(p) { + return def + } + l := p.List() + b := l.seg.slice(l.off, Size(l.length)) + if b == nil { + return def + } + return b +} + +func (p Ptr) toPointer() Pointer { + if p.seg == nil { + return nil + } + switch p.flags.ptrType() { + case structPtrType: + return p.Struct() + case listPtrType: + return p.List() + case interfacePtrType: + return p.Interface() + } + return nil +} + +// IsValid reports whether p is valid. +func (p Ptr) IsValid() bool { + return p.seg != nil +} + +// Segment returns the segment this pointer points into. +// If nil, then this is an invalid pointer. +func (p Ptr) Segment() *Segment { + return p.seg +} + +// Default returns p if it is valid, otherwise it unmarshals def. +func (p Ptr) Default(def []byte) (Ptr, error) { + if !p.IsValid() { + return unmarshalDefault(def) + } + return p, nil +} + +// SamePtr reports whether p and q refer to the same object. +func SamePtr(p, q Ptr) bool { + return p.seg == q.seg && p.off == q.off +} + +// A value that implements Pointer is a reference to a Cap'n Proto object. +// +// Deprecated: Using this type introduces an unnecessary allocation. +// Use Ptr instead. +type Pointer interface { + // Segment returns the segment this pointer points into. + // If nil, then this is an invalid pointer. + Segment() *Segment + + // HasData reports whether the object referenced by the pointer has + // non-zero size. + HasData() bool + + // underlying returns a Pointer that is one of a Struct, a List, or an + // Interface. + underlying() Pointer +} + +// IsValid reports whether p is valid. +// +// Deprecated: Use Ptr.IsValid instead. +func IsValid(p Pointer) bool { + return p != nil && p.Segment() != nil +} + +// HasData reports whether p has non-zero size. +// +// Deprecated: There are usually better ways to determine this +// information: length of a list, checking fields, or using HasFoo +// accessors. +func HasData(p Pointer) bool { + return IsValid(p) && p.HasData() +} + +// PointerDefault returns p if it is valid, otherwise it unmarshals def. +// +// Deprecated: Use Ptr.Default. +func PointerDefault(p Pointer, def []byte) (Pointer, error) { + pp, err := toPtr(p).Default(def) + return pp.toPointer(), err +} + +func unmarshalDefault(def []byte) (Ptr, error) { + msg, err := Unmarshal(def) + if err != nil { + return Ptr{}, err + } + p, err := msg.RootPtr() + if err != nil { + return Ptr{}, err + } + return p, nil +} + +type ptrFlags uint8 + +const interfacePtrFlag ptrFlags = interfacePtrType << 6 + +func structPtrFlag(f structFlags) ptrFlags { + return structPtrType<<6 | ptrFlags(f)&ptrLowerMask +} + +func listPtrFlag(f listFlags) ptrFlags { + return listPtrType<<6 | ptrFlags(f)&ptrLowerMask +} + +const ( + structPtrType = iota + listPtrType + interfacePtrType +) + +func (f ptrFlags) ptrType() int { + return int(f >> 6) +} + +const ptrLowerMask ptrFlags = 0x3f + +func (f ptrFlags) listFlags() listFlags { + return listFlags(f & ptrLowerMask) +} + +func (f ptrFlags) structFlags() structFlags { + return structFlags(f & ptrLowerMask) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/rawpointer.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/rawpointer.go new file mode 100644 index 00000000..b72e2c3d --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/rawpointer.go @@ -0,0 +1,189 @@ +package capnp + +// pointerOffset is an address offset in multiples of word size. +type pointerOffset int32 + +// resolve returns an absolute address relative to a base address. +// For near pointers, the base is the end of the near pointer. +// For far pointers, the base is zero (the beginning of the segment). +func (off pointerOffset) resolve(base Address) (_ Address, ok bool) { + if off == 0 { + return base, true + } + addr := base + Address(off*pointerOffset(wordSize)) + return addr, (addr > base || off < 0) && (addr < base || off > 0) +} + +// nearPointerOffset computes the offset for a pointer at paddr to point to addr. +func nearPointerOffset(paddr, addr Address) pointerOffset { + return pointerOffset(addr/Address(wordSize) - paddr/Address(wordSize) - 1) +} + +// rawPointer is an encoded pointer. +type rawPointer uint64 + +// rawStructPointer returns a struct pointer. The offset is from the +// end of the pointer to the start of the struct. +func rawStructPointer(off pointerOffset, sz ObjectSize) rawPointer { + return rawPointer(structPointer) | rawPointer(uint32(off)<<2) | rawPointer(sz.dataWordCount())<<32 | rawPointer(sz.PointerCount)<<48 +} + +// rawListPointer returns a list pointer. The offset is the number of +// words relative to the end of the pointer that the list starts. If +// listType is compositeList, then length is the number of words +// that the list occupies, otherwise it is the number of elements in +// the list. +func rawListPointer(off pointerOffset, listType listType, length int32) rawPointer { + return rawPointer(listPointer) | rawPointer(uint32(off)<<2) | rawPointer(listType)<<32 | rawPointer(length)<<35 +} + +// rawInterfacePointer returns an interface pointer that references +// a capability number. +func rawInterfacePointer(capability CapabilityID) rawPointer { + return rawPointer(otherPointer) | rawPointer(capability)<<32 +} + +// rawFarPointer returns a pointer to a pointer in another segment. +func rawFarPointer(segID SegmentID, off Address) rawPointer { + return rawPointer(farPointer) | rawPointer(off&^7) | (rawPointer(segID) << 32) +} + +// rawDoubleFarPointer returns a pointer to a pointer in another segment. +func rawDoubleFarPointer(segID SegmentID, off Address) rawPointer { + return rawPointer(doubleFarPointer) | rawPointer(off&^7) | (rawPointer(segID) << 32) +} + +// landingPadNearPointer converts a double-far pointer landing pad into +// a near pointer in the destination segment. Its offset will be +// relative to the beginning of the segment. tag must be either a +// struct or a list pointer. +func landingPadNearPointer(far, tag rawPointer) rawPointer { + // Replace tag's offset with far's offset. + // far's offset (29-bit unsigned) just needs to be shifted down to + // make it into a signed 30-bit value. + return tag&^0xfffffffc | rawPointer(uint32(far&^3)>>1) +} + +type pointerType int + +// Raw pointer types. +const ( + structPointer pointerType = 0 + listPointer pointerType = 1 + farPointer pointerType = 2 + doubleFarPointer pointerType = 6 + otherPointer pointerType = 3 +) + +func (p rawPointer) pointerType() pointerType { + t := pointerType(p & 3) + if t == farPointer { + return pointerType(p & 7) + } + return t +} + +func (p rawPointer) structSize() ObjectSize { + c := uint16(p >> 32) + d := uint16(p >> 48) + return ObjectSize{ + DataSize: Size(c) * wordSize, + PointerCount: d, + } +} + +type listType int + +// Raw list pointer types. +const ( + voidList listType = 0 + bit1List listType = 1 + byte1List listType = 2 + byte2List listType = 3 + byte4List listType = 4 + byte8List listType = 5 + pointerList listType = 6 + compositeList listType = 7 +) + +func (p rawPointer) listType() listType { + return listType((p >> 32) & 7) +} + +func (p rawPointer) numListElements() int32 { + return int32(p >> 35) +} + +// elementSize returns the size of an individual element in the list referenced by p. +func (p rawPointer) elementSize() ObjectSize { + switch p.listType() { + case voidList: + return ObjectSize{} + case bit1List: + // Size is ignored on bit lists. + return ObjectSize{} + case byte1List: + return ObjectSize{DataSize: 1} + case byte2List: + return ObjectSize{DataSize: 2} + case byte4List: + return ObjectSize{DataSize: 4} + case byte8List: + return ObjectSize{DataSize: 8} + case pointerList: + return ObjectSize{PointerCount: 1} + default: + panic("elementSize not supposed to be called on composite or unknown list type") + } +} + +// totalListSize returns the total size of the list referenced by p. +func (p rawPointer) totalListSize() (sz Size, ok bool) { + n := p.numListElements() + switch p.listType() { + case voidList: + return 0, true + case bit1List: + return Size((n + 7) / 8), true + case compositeList: + // For a composite list, n represents the number of words (excluding the tag word). + return wordSize.times(n + 1) + default: + return p.elementSize().totalSize().times(n) + } +} + +// offset returns a pointer's offset. Only valid for struct or list +// pointers. +func (p rawPointer) offset() pointerOffset { + return pointerOffset(int32(p) >> 2) +} + +// withOffset replaces a pointer's offset. Only valid for struct or +// list pointers. +func (p rawPointer) withOffset(off pointerOffset) rawPointer { + return p&^0xfffffffc | rawPointer(uint32(off<<2)) +} + +// farAddress returns the address of the landing pad pointer. +func (p rawPointer) farAddress() Address { + // Far pointer offset is 29 bits, starting after the low 3 bits. + // It's an unsigned word offset, which would be equivalent to a + // logical left shift by 3. + return Address(p) &^ 7 +} + +// farSegment returns the segment ID that the far pointer references. +func (p rawPointer) farSegment() SegmentID { + return SegmentID(p >> 32) +} + +// otherPointerType returns the type of "other pointer" from p. +func (p rawPointer) otherPointerType() uint32 { + return uint32(p) >> 2 +} + +// capabilityIndex returns the index of the capability in the message's capability table. +func (p rawPointer) capabilityIndex() CapabilityID { + return CapabilityID(p >> 32) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/readlimit.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/readlimit.go new file mode 100644 index 00000000..1e1f9808 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/readlimit.go @@ -0,0 +1,38 @@ +package capnp + +import "sync/atomic" + +// A ReadLimiter tracks the number of bytes read from a message in order +// to avoid amplification attacks as detailed in +// https://capnproto.org/encoding.html#amplification-attack. +// It is safe to use from multiple goroutines. +type ReadLimiter struct { + limit uint64 +} + +// canRead reports whether the amount of bytes can be stored safely. +func (rl *ReadLimiter) canRead(sz Size) bool { + for { + curr := atomic.LoadUint64(&rl.limit) + ok := curr >= uint64(sz) + var new uint64 + if ok { + new = curr - uint64(sz) + } else { + new = 0 + } + if atomic.CompareAndSwapUint64(&rl.limit, curr, new) { + return ok + } + } +} + +// Reset sets the number of bytes allowed to be read. +func (rl *ReadLimiter) Reset(limit uint64) { + atomic.StoreUint64(&rl.limit, limit) +} + +// Unread increases the limit by sz. +func (rl *ReadLimiter) Unread(sz Size) { + atomic.AddUint64(&rl.limit, uint64(sz)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/regen.sh b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/regen.sh new file mode 100644 index 00000000..a9c6cb14 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/regen.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# regen.sh - update capnpc-go and regenerate schemas +set -euo pipefail + +cd "$(dirname "$0")" + +echo "** mktemplates" +(cd internal/cmd/mktemplates && go build -tags=mktemplates) + +echo "** capnpc-go" +# Run tests so that we don't install a broken capnpc-go. +(cd capnpc-go && go generate && go test && go install) + +echo "** schemas" +(cd std/capnp; ./gen.sh compile) +capnp compile -ogo std/go.capnp && mv std/go.capnp.go ./ +go generate ./... diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/BUILD.bazel b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/BUILD.bazel new file mode 100644 index 00000000..b387aeaa --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["schemas.go"], + visibility = ["//visibility:public"], + deps = ["//internal/packed:go_default_library"], +) + +go_test( + name = "go_default_xtest", + srcs = ["schemas_test.go"], + deps = [ + ":go_default_library", + "//:go_default_library", + "//internal/schema:go_default_library", + ], +) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/schemas.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/schemas.go new file mode 100644 index 00000000..8da117e4 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/schemas/schemas.go @@ -0,0 +1,185 @@ +// Package schemas provides a container for Cap'n Proto reflection data. +// The code generated by capnpc-go will register its schema in the +// default registry (unless disabled at generation time). +// +// Most programs will use the default registry. However, a program +// could dynamically build up a registry, perhaps by invoking the capnp +// tool or querying a service. +package schemas + +import ( + "bufio" + "bytes" + "compress/zlib" + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "zombiezen.com/go/capnproto2/internal/packed" +) + +// A Schema is a collection of schema nodes parsed by the capnp tool. +type Schema struct { + // Either String or Bytes must be populated with a CodeGeneratorRequest + // message in the standard Cap'n Proto framing format. + String string + Bytes []byte + + // If true, the input is assumed to be zlib-compressed and packed. + Compressed bool + + // Node IDs that are contained in this schema. + Nodes []uint64 +} + +// A Registry is a mapping of IDs to schema blobs. It is safe to read +// from multiple goroutines. The zero value is an empty registry. +type Registry struct { + m map[uint64]*record +} + +// Register indexes a schema in the registry. It is an error to +// register schemas with overlapping IDs. +func (reg *Registry) Register(s *Schema) error { + if len(s.String) > 0 && len(s.Bytes) > 0 { + return errors.New("schemas: schema should have only one of string or bytes") + } + r := &record{ + s: s.String, + data: s.Bytes, + compressed: s.Compressed, + } + if reg.m == nil { + reg.m = make(map[uint64]*record) + } + for _, id := range s.Nodes { + if _, dup := reg.m[id]; dup { + return &dupeError{id: id} + } + reg.m[id] = r + } + return nil +} + +// Find returns the CodeGeneratorRequest message for the given ID, +// suitable for capnp.Unmarshal. If the ID is not found, Find returns +// an error that can be identified with IsNotFound. The returned byte +// slice should not be modified. +func (reg *Registry) Find(id uint64) ([]byte, error) { + r := reg.m[id] + if r == nil { + return nil, ¬FoundError{id: id} + } + b, err := r.read() + if err != nil { + return nil, &decompressError{id, err} + } + return b, nil +} + +type record struct { + // All the fields are protected by once. + once sync.Once + s string // input + compressed bool + data []byte // input and result + err error // result +} + +func (r *record) read() ([]byte, error) { + r.once.Do(func() { + if !r.compressed { + if r.s != "" { + r.data = []byte(r.s) + r.s = "" + } + return + } + var in io.Reader + if r.s != "" { + in = strings.NewReader(r.s) + r.s = "" + } else { + in = bytes.NewReader(r.data) + } + z, err := zlib.NewReader(in) + if err != nil { + r.data, r.err = nil, err + return + } + p := packed.NewReader(bufio.NewReader(z)) + r.data, r.err = ioutil.ReadAll(p) + if err != nil { + r.data = nil + return + } + }) + return r.data, r.err +} + +// DefaultRegistry is the process-wide registry used by Register and Find. +var DefaultRegistry Registry + +// Register is called by generated code to associate a blob of zlib- +// compressed, packed Cap'n Proto data for a CodeGeneratorRequest with +// the IDs it contains. It should only be called during init(). +func Register(data string, ids ...uint64) { + err := DefaultRegistry.Register(&Schema{ + String: data, + Nodes: ids, + Compressed: true, + }) + if err != nil { + panic(err) + } +} + +// Find returns the CodeGeneratorRequest message for the given ID, +// suitable for capnp.Unmarshal, or nil if the ID was not found. +// It is safe to call Find from multiple goroutines, so the returned +// byte slice should not be modified. However, it is not safe to +// call Find concurrently with Register. +func Find(id uint64) []byte { + b, err := DefaultRegistry.Find(id) + if IsNotFound(err) { + return nil + } + if err != nil { + panic(err) + } + return b +} + +// IsNotFound reports whether e indicates a failure to find a schema. +func IsNotFound(e error) bool { + _, ok := e.(*notFoundError) + return ok +} + +type dupeError struct { + id uint64 +} + +func (e *dupeError) Error() string { + return fmt.Sprintf("schemas: registered @%#x twice", e.id) +} + +type notFoundError struct { + id uint64 +} + +func (e *notFoundError) Error() string { + return fmt.Sprintf("schemas: could not find @%#x", e.id) +} + +type decompressError struct { + id uint64 + err error +} + +func (e *decompressError) Error() string { + return fmt.Sprintf("schemas: decompressing schema for @%#x: %v", e.id, e.err) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/strings.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/strings.go new file mode 100644 index 00000000..a3f45b4c --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/strings.go @@ -0,0 +1,125 @@ +// +build !nocapnpstrings + +package capnp + +import ( + "fmt" +) + +// String returns the address in hex format. +func (addr Address) String() string { + return fmt.Sprintf("%#08x", uint64(addr)) +} + +// GoString returns the address in hex format. +func (addr Address) GoString() string { + return fmt.Sprintf("capnp.Address(%#08x)", uint64(addr)) +} + +// String returns the size in the format "X bytes". +func (sz Size) String() string { + if sz == 1 { + return "1 byte" + } + return fmt.Sprintf("%d bytes", sz) +} + +// GoString returns the size as a Go expression. +func (sz Size) GoString() string { + return fmt.Sprintf("capnp.Size(%d)", sz) +} + +// String returns the offset in the format "+X bytes". +func (off DataOffset) String() string { + if off == 1 { + return "+1 byte" + } + return fmt.Sprintf("+%d bytes", off) +} + +// GoString returns the offset as a Go expression. +func (off DataOffset) GoString() string { + return fmt.Sprintf("capnp.DataOffset(%d)", off) +} + +// String returns a short, human readable representation of the object +// size. +func (sz ObjectSize) String() string { + return fmt.Sprintf("{datasz=%d ptrs=%d}", sz.DataSize, sz.PointerCount) +} + +// GoString formats the ObjectSize as a keyed struct literal. +func (sz ObjectSize) GoString() string { + return fmt.Sprintf("capnp.ObjectSize{DataSize: %d, PointerCount: %d}", sz.DataSize, sz.PointerCount) +} + +// String returns the offset in the format "bit X". +func (bit BitOffset) String() string { + return fmt.Sprintf("bit %d", bit) +} + +// GoString returns the offset as a Go expression. +func (bit BitOffset) GoString() string { + return fmt.Sprintf("capnp.BitOffset(%d)", bit) +} + +// String returns the ID in the format "capability X". +func (id CapabilityID) String() string { + return fmt.Sprintf("capability %d", id) +} + +// GoString returns the ID as a Go expression. +func (id CapabilityID) GoString() string { + return fmt.Sprintf("capnp.CapabilityID(%d)", id) +} + +// GoString formats the pointer as a call to one of the rawPointer +// construction functions. +func (p rawPointer) GoString() string { + if p == 0 { + return "rawPointer(0)" + } + switch p.pointerType() { + case structPointer: + return fmt.Sprintf("rawStructPointer(%d, %#v)", p.offset(), p.structSize()) + case listPointer: + var lt string + switch p.listType() { + case voidList: + lt = "voidList" + case bit1List: + lt = "bit1List" + case byte1List: + lt = "byte1List" + case byte2List: + lt = "byte2List" + case byte4List: + lt = "byte4List" + case byte8List: + lt = "byte8List" + case pointerList: + lt = "pointerList" + case compositeList: + lt = "compositeList" + } + return fmt.Sprintf("rawListPointer(%d, %s, %d)", p.offset(), lt, p.numListElements()) + case farPointer: + return fmt.Sprintf("rawFarPointer(%d, %v)", p.farSegment(), p.farAddress()) + case doubleFarPointer: + return fmt.Sprintf("rawDoubleFarPointer(%d, %v)", p.farSegment(), p.farAddress()) + default: + // other pointer + if p.otherPointerType() != 0 { + return fmt.Sprintf("rawPointer(%#016x)", uint64(p)) + } + return fmt.Sprintf("rawInterfacePointer(%d)", p.capabilityIndex()) + } +} + +func (ssa *singleSegmentArena) String() string { + return fmt.Sprintf("single-segment arena [len=%d cap=%d]", len(*ssa), cap(*ssa)) +} + +func (msa *multiSegmentArena) String() string { + return fmt.Sprintf("multi-segment arena [%d segments]", len(*msa)) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/struct.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/struct.go new file mode 100644 index 00000000..92148253 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/zombiezen.com/go/capnproto2/struct.go @@ -0,0 +1,368 @@ +package capnp + +// Struct is a pointer to a struct. +type Struct struct { + seg *Segment + off Address + size ObjectSize + depthLimit uint + flags structFlags +} + +// NewStruct creates a new struct, preferring placement in s. +func NewStruct(s *Segment, sz ObjectSize) (Struct, error) { + if !sz.isValid() { + return Struct{}, errObjectSize + } + sz.DataSize = sz.DataSize.padToWord() + seg, addr, err := alloc(s, sz.totalSize()) + if err != nil { + return Struct{}, err + } + return Struct{ + seg: seg, + off: addr, + size: sz, + depthLimit: maxDepth, + }, nil +} + +// NewRootStruct creates a new struct, preferring placement in s, then sets the +// message's root to the new struct. +func NewRootStruct(s *Segment, sz ObjectSize) (Struct, error) { + st, err := NewStruct(s, sz) + if err != nil { + return st, err + } + if err := s.msg.SetRootPtr(st.ToPtr()); err != nil { + return st, err + } + return st, nil +} + +// ToStruct converts p to a Struct. +// +// Deprecated: Use Ptr.Struct. +func ToStruct(p Pointer) Struct { + if !IsValid(p) { + return Struct{} + } + s, ok := p.underlying().(Struct) + if !ok { + return Struct{} + } + return s +} + +// ToStructDefault attempts to convert p into a struct, reading the +// default value from def if p is not a struct. +// +// Deprecated: Use Ptr.StructDefault. +func ToStructDefault(p Pointer, def []byte) (Struct, error) { + return toPtr(p).StructDefault(def) +} + +// ToPtr converts the struct to a generic pointer. +func (p Struct) ToPtr() Ptr { + return Ptr{ + seg: p.seg, + off: p.off, + size: p.size, + depthLimit: p.depthLimit, + flags: structPtrFlag(p.flags), + } +} + +// Segment returns the segment this pointer came from. +func (p Struct) Segment() *Segment { + return p.seg +} + +// IsValid returns whether the struct is valid. +func (p Struct) IsValid() bool { + return p.seg != nil +} + +// Address returns the address the pointer references. +// +// Deprecated: The return value is not well-defined. Use SamePtr if you +// need to check whether two pointers refer to the same object. +func (p Struct) Address() Address { + return p.off +} + +// Size returns the size of the struct. +func (p Struct) Size() ObjectSize { + return p.size +} + +// HasData reports whether the struct has a non-zero size. +func (p Struct) HasData() bool { + return !p.size.isZero() +} + +// readSize returns the struct's size for the purposes of read limit +// accounting. +func (p Struct) readSize() Size { + if p.seg == nil { + return 0 + } + return p.size.totalSize() +} + +func (p Struct) underlying() Pointer { + return p +} + +// Pointer returns the i'th pointer in the struct. +// +// Deprecated: Use Ptr. +func (p Struct) Pointer(i uint16) (Pointer, error) { + pp, err := p.Ptr(i) + return pp.toPointer(), err +} + +// Ptr returns the i'th pointer in the struct. +func (p Struct) Ptr(i uint16) (Ptr, error) { + if p.seg == nil || i >= p.size.PointerCount { + return Ptr{}, nil + } + return p.seg.readPtr(p.pointerAddress(i), p.depthLimit) +} + +// SetPointer sets the i'th pointer in the struct to src. +// +// Deprecated: Use SetPtr. +func (p Struct) SetPointer(i uint16, src Pointer) error { + return p.SetPtr(i, toPtr(src)) +} + +// SetPtr sets the i'th pointer in the struct to src. +func (p Struct) SetPtr(i uint16, src Ptr) error { + if p.seg == nil || i >= p.size.PointerCount { + panic(errOutOfBounds) + } + return p.seg.writePtr(p.pointerAddress(i), src, false) +} + +// SetText sets the i'th pointer to a newly allocated text or null if v is empty. +func (p Struct) SetText(i uint16, v string) error { + if v == "" { + return p.SetPtr(i, Ptr{}) + } + return p.SetNewText(i, v) +} + +// SetNewText sets the i'th pointer to a newly allocated text. +func (p Struct) SetNewText(i uint16, v string) error { + t, err := NewText(p.seg, v) + if err != nil { + return err + } + return p.SetPtr(i, t.List.ToPtr()) +} + +// SetTextFromBytes sets the i'th pointer to a newly allocated text or null if v is nil. +func (p Struct) SetTextFromBytes(i uint16, v []byte) error { + if v == nil { + return p.SetPtr(i, Ptr{}) + } + t, err := NewTextFromBytes(p.seg, v) + if err != nil { + return err + } + return p.SetPtr(i, t.List.ToPtr()) +} + +// SetData sets the i'th pointer to a newly allocated data or null if v is nil. +func (p Struct) SetData(i uint16, v []byte) error { + if v == nil { + return p.SetPtr(i, Ptr{}) + } + d, err := NewData(p.seg, v) + if err != nil { + return err + } + return p.SetPtr(i, d.List.ToPtr()) +} + +func (p Struct) pointerAddress(i uint16) Address { + // Struct already had bounds check + ptrStart, _ := p.off.addSize(p.size.DataSize) + a, _ := ptrStart.element(int32(i), wordSize) + return a +} + +// bitInData reports whether bit is inside p's data section. +func (p Struct) bitInData(bit BitOffset) bool { + return p.seg != nil && bit < BitOffset(p.size.DataSize*8) +} + +// Bit returns the bit that is n bits from the start of the struct. +func (p Struct) Bit(n BitOffset) bool { + if !p.bitInData(n) { + return false + } + addr := p.off.addOffset(n.offset()) + return p.seg.readUint8(addr)&n.mask() != 0 +} + +// SetBit sets the bit that is n bits from the start of the struct to v. +func (p Struct) SetBit(n BitOffset, v bool) { + if !p.bitInData(n) { + panic(errOutOfBounds) + } + addr := p.off.addOffset(n.offset()) + b := p.seg.readUint8(addr) + if v { + b |= n.mask() + } else { + b &^= n.mask() + } + p.seg.writeUint8(addr, b) +} + +func (p Struct) dataAddress(off DataOffset, sz Size) (addr Address, ok bool) { + if p.seg == nil || Size(off)+sz > p.size.DataSize { + return 0, false + } + return p.off.addOffset(off), true +} + +// Uint8 returns an 8-bit integer from the struct's data section. +func (p Struct) Uint8(off DataOffset) uint8 { + addr, ok := p.dataAddress(off, 1) + if !ok { + return 0 + } + return p.seg.readUint8(addr) +} + +// Uint16 returns a 16-bit integer from the struct's data section. +func (p Struct) Uint16(off DataOffset) uint16 { + addr, ok := p.dataAddress(off, 2) + if !ok { + return 0 + } + return p.seg.readUint16(addr) +} + +// Uint32 returns a 32-bit integer from the struct's data section. +func (p Struct) Uint32(off DataOffset) uint32 { + addr, ok := p.dataAddress(off, 4) + if !ok { + return 0 + } + return p.seg.readUint32(addr) +} + +// Uint64 returns a 64-bit integer from the struct's data section. +func (p Struct) Uint64(off DataOffset) uint64 { + addr, ok := p.dataAddress(off, 8) + if !ok { + return 0 + } + return p.seg.readUint64(addr) +} + +// SetUint8 sets the 8-bit integer that is off bytes from the start of the struct to v. +func (p Struct) SetUint8(off DataOffset, v uint8) { + addr, ok := p.dataAddress(off, 1) + if !ok { + panic(errOutOfBounds) + } + p.seg.writeUint8(addr, v) +} + +// SetUint16 sets the 16-bit integer that is off bytes from the start of the struct to v. +func (p Struct) SetUint16(off DataOffset, v uint16) { + addr, ok := p.dataAddress(off, 2) + if !ok { + panic(errOutOfBounds) + } + p.seg.writeUint16(addr, v) +} + +// SetUint32 sets the 32-bit integer that is off bytes from the start of the struct to v. +func (p Struct) SetUint32(off DataOffset, v uint32) { + addr, ok := p.dataAddress(off, 4) + if !ok { + panic(errOutOfBounds) + } + p.seg.writeUint32(addr, v) +} + +// SetUint64 sets the 64-bit integer that is off bytes from the start of the struct to v. +func (p Struct) SetUint64(off DataOffset, v uint64) { + addr, ok := p.dataAddress(off, 8) + if !ok { + panic(errOutOfBounds) + } + p.seg.writeUint64(addr, v) +} + +// structFlags is a bitmask of flags for a pointer. +type structFlags uint8 + +// Pointer flags. +const ( + isListMember structFlags = 1 << iota +) + +// copyStruct makes a deep copy of src into dst. +func copyStruct(dst, src Struct) error { + if dst.seg == nil { + return nil + } + + // Q: how does version handling happen here, when the + // destination toData[] slice can be bigger or smaller + // than the source data slice, which is in + // src.seg.Data[src.off:src.off+src.size.DataSize] ? + // + // A: Newer fields only come *after* old fields. Note that + // copy only copies min(len(src), len(dst)) size, + // and then we manually zero the rest in the for loop + // that writes toData[j] = 0. + // + + // data section: + srcData := src.seg.slice(src.off, src.size.DataSize) + dstData := dst.seg.slice(dst.off, dst.size.DataSize) + copyCount := copy(dstData, srcData) + dstData = dstData[copyCount:] + for j := range dstData { + dstData[j] = 0 + } + + // ptrs section: + + // version handling: we ignore any extra-newer-pointers in src, + // i.e. the case when srcPtrSize > dstPtrSize, by only + // running j over the size of dstPtrSize, the destination size. + srcPtrSect, _ := src.off.addSize(src.size.DataSize) + dstPtrSect, _ := dst.off.addSize(dst.size.DataSize) + numSrcPtrs := src.size.PointerCount + numDstPtrs := dst.size.PointerCount + for j := uint16(0); j < numSrcPtrs && j < numDstPtrs; j++ { + srcAddr, _ := srcPtrSect.element(int32(j), wordSize) + dstAddr, _ := dstPtrSect.element(int32(j), wordSize) + m, err := src.seg.readPtr(srcAddr, src.depthLimit) + if err != nil { + return err + } + err = dst.seg.writePtr(dstAddr, m, true) + if err != nil { + return err + } + } + for j := numSrcPtrs; j < numDstPtrs; j++ { + // destination p is a newer version than source so these extra new pointer fields in p must be zeroed. + addr, _ := dstPtrSect.element(int32(j), wordSize) + dst.seg.writeRawPointer(addr, 0) + } + // Nothing more here: so any other pointers in srcPtrSize beyond + // those in dstPtrSize are ignored and discarded. + + return nil +}