From 6e7b6e9a6efa3618d09af01a95ce285b32e2398c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 11 Apr 2024 19:15:26 +0200 Subject: [PATCH] vectorized aggregation as separate plan node (#6784) This PR is a little too big, but it proved difficult to split into parts because they are all dependent. * Move the vectorized aggregation into a separate plan node, which simplifies working with targetlist in DecompressChunk node. * Add a post-planning hook that replaces the normal partial aggregation node with the vectorized aggregation node. The advantage of this compared to planning on Path stage is that we know which columns support bulk decompression and which filters are vectorized. * Use the compressed batch API in vectorized aggregation. This simplifies the code. * Support vectorized aggregation after vectorized filters. * Add a simple generic interface for vectorized aggregate functions. For now the only function is still `sum(int4)`. * The parallel plans are now used more often, maybe because the old code didn't add costs for aggregation and just used the costs from DecompressChunk, so the costs of parallel plans were less different. The current code does the cost-based planning for normal aggregates, and then after planning replaces them with vectorized, so now we basically follow the plan choice that Postgres makes for the usual aggregation. --- src/cross_module_fn.c | 8 +- src/cross_module_fn.h | 2 +- src/import/CMakeLists.txt | 1 + src/import/list.c | 88 + src/import/list.h | 17 + src/nodes/chunk_append/planner.c | 33 +- src/nodes/vector_agg.h | 13 + src/planner/partialize.c | 18 +- src/planner/planner.c | 2 + tsl/src/CMakeLists.txt | 1 - tsl/src/compression/array.c | 25 +- tsl/src/compression/arrow_c_data_interface.h | 19 + tsl/src/compression/deltadelta_impl.c | 33 +- tsl/src/compression/dictionary.c | 25 +- tsl/src/compression/gorilla_impl.c | 32 +- tsl/src/init.c | 5 +- tsl/src/nodes/CMakeLists.txt | 1 + .../nodes/decompress_chunk/decompress_chunk.h | 52 - tsl/src/nodes/decompress_chunk/exec.c | 340 +-- tsl/src/nodes/decompress_chunk/exec.h | 8 +- tsl/src/nodes/decompress_chunk/planner.c | 221 +- tsl/src/nodes/decompress_chunk/planner.h | 23 +- tsl/src/nodes/vector_agg/CMakeLists.txt | 4 + tsl/src/nodes/vector_agg/exec.c | 201 ++ .../vector_agg/exec.h} | 13 +- tsl/src/nodes/vector_agg/functions.c | 139 + tsl/src/nodes/vector_agg/functions.h | 27 + tsl/src/nodes/vector_agg/plan.c | 352 +++ tsl/src/nodes/vector_agg/plan.h | 18 + tsl/src/partialize_agg.c | 120 - tsl/src/planner.c | 13 + tsl/src/planner.h | 1 + tsl/test/expected/decompress_vector_qual.out | 8 +- tsl/test/expected/vector_agg_default.out | 167 ++ tsl/test/expected/vector_agg_param.out | 50 + tsl/test/expected/vectorized_aggregation.out | 2294 +++++++++-------- tsl/test/sql/CMakeLists.txt | 4 +- tsl/test/sql/decompress_vector_qual.sql | 8 +- tsl/test/sql/vector_agg_default.sql | 50 + tsl/test/sql/vector_agg_param.sql | 28 + tsl/test/sql/vectorized_aggregation.sql | 4 +- 41 files changed, 2709 insertions(+), 1759 deletions(-) create mode 100644 src/import/list.c create mode 100644 src/import/list.h create mode 100644 src/nodes/vector_agg.h create mode 100644 tsl/src/nodes/vector_agg/CMakeLists.txt create mode 100644 tsl/src/nodes/vector_agg/exec.c rename tsl/src/{partialize_agg.h => nodes/vector_agg/exec.h} (52%) create mode 100644 tsl/src/nodes/vector_agg/functions.c create mode 100644 tsl/src/nodes/vector_agg/functions.h create mode 100644 tsl/src/nodes/vector_agg/plan.c create mode 100644 tsl/src/nodes/vector_agg/plan.h delete mode 100644 tsl/src/partialize_agg.c create mode 100644 tsl/test/expected/vector_agg_default.out create mode 100644 tsl/test/expected/vector_agg_param.out create mode 100644 tsl/test/sql/vector_agg_default.sql create mode 100644 tsl/test/sql/vector_agg_param.sql diff --git a/src/cross_module_fn.c b/src/cross_module_fn.c index 8482b901dbe..3d7e4c8319e 100644 --- a/src/cross_module_fn.c +++ b/src/cross_module_fn.c @@ -124,11 +124,9 @@ job_execute_default_fn(BgwJob *job) pg_unreachable(); } -static bool -push_down_aggregation(PlannerInfo *root, AggPath *aggregation_path, Path *subpath) +static void +tsl_postprocess_plan_stub(PlannedStmt *stmt) { - /* Don't skip adding the agg node on top of the path */ - return false; } static bool @@ -322,7 +320,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = { .policies_alter = error_no_default_fn_pg_community, .policies_show = error_no_default_fn_pg_community, - .push_down_aggregation = push_down_aggregation, + .tsl_postprocess_plan = tsl_postprocess_plan_stub, .partialize_agg = error_no_default_fn_pg_community, .finalize_agg_sfunc = error_no_default_fn_pg_community, diff --git a/src/cross_module_fn.h b/src/cross_module_fn.h index c443bcab1c3..010bc990322 100644 --- a/src/cross_module_fn.h +++ b/src/cross_module_fn.h @@ -90,7 +90,7 @@ typedef struct CrossModuleFunctions PGFunction move_chunk; /* Vectorized queries */ - bool (*push_down_aggregation)(PlannerInfo *root, AggPath *aggregation_path, Path *subpath); + void (*tsl_postprocess_plan)(PlannedStmt *stmt); /* Continuous Aggregates */ PGFunction partialize_agg; diff --git a/src/import/CMakeLists.txt b/src/import/CMakeLists.txt index f6e08a0c6a4..9d837feb045 100644 --- a/src/import/CMakeLists.txt +++ b/src/import/CMakeLists.txt @@ -1,6 +1,7 @@ set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/allpaths.c ${CMAKE_CURRENT_SOURCE_DIR}/ht_hypertable_modify.c + ${CMAKE_CURRENT_SOURCE_DIR}/list.c ${CMAKE_CURRENT_SOURCE_DIR}/planner.c ${CMAKE_CURRENT_SOURCE_DIR}/ts_explain.c) diff --git a/src/import/list.c b/src/import/list.c new file mode 100644 index 00000000000..fb420d89a46 --- /dev/null +++ b/src/import/list.c @@ -0,0 +1,88 @@ +/* + * This file and its contents are licensed under the Apache License 2.0. + * Please see the included NOTICE for copyright information and + * LICENSE-APACHE for a copy of the license. + */ + +#include + +#include +#include + +#include "import/list.h" + +/* + * This file contains source code that was copied and/or modified from + * the PostgreSQL database, which is licensed under the open-source + * PostgreSQL License. Please see the NOTICE at the top level + * directory for a copy of the PostgreSQL License. + * + * Copied from PostgreSQL 15.0 (2a7ce2e2ce474504a707ec03e128fde66cfb8b48) + * without modifications. + */ + +/* Overhead for the fixed part of a List header, measured in ListCells */ +#define LIST_HEADER_OVERHEAD ((int) ((offsetof(List, initial_elements) - 1) / sizeof(ListCell) + 1)) + +/* + * Return a freshly allocated List with room for at least min_size cells. + * + * Since empty non-NIL lists are invalid, new_list() sets the initial length + * to min_size, effectively marking that number of cells as valid; the caller + * is responsible for filling in their data. + */ +List * +ts_new_list(NodeTag type, int min_size) +{ + List *newlist; + int max_size; + + Assert(min_size > 0); + + /* + * We allocate all the requested cells, and possibly some more, as part of + * the same palloc request as the List header. This is a big win for the + * typical case of short fixed-length lists. It can lose if we allocate a + * moderately long list and then it gets extended; we'll be wasting more + * initial_elements[] space than if we'd made the header small. However, + * rounding up the request as we do in the normal code path provides some + * defense against small extensions. + */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * Normally, we set up a list with some extra cells, to allow it to grow + * without a repalloc. Prefer cell counts chosen to make the total + * allocation a power-of-2, since palloc would round it up to that anyway. + * (That stops being true for very large allocations, but very long lists + * are infrequent, so it doesn't seem worth special logic for such cases.) + * + * The minimum allocation is 8 ListCell units, providing either 4 or 5 + * available ListCells depending on the machine's word width. Counting + * palloc's overhead, this uses the same amount of space as a one-cell + * list did in the old implementation, and less space for any longer list. + * + * We needn't worry about integer overflow; no caller passes min_size + * that's more than twice the size of an existing list, so the size limits + * within palloc will ensure that we don't overflow here. + */ + max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD)); + max_size -= LIST_HEADER_OVERHEAD; +#else + + /* + * For debugging, don't allow any extra space. This forces any cell + * addition to go through enlarge_list() and thus move the existing data. + */ + max_size = min_size; +#endif + + newlist = (List *) palloc(offsetof(List, initial_elements) + max_size * sizeof(ListCell)); + newlist->type = type; + newlist->length = min_size; + newlist->max_length = max_size; + newlist->elements = newlist->initial_elements; + + return newlist; +} diff --git a/src/import/list.h b/src/import/list.h new file mode 100644 index 00000000000..28b45d67be2 --- /dev/null +++ b/src/import/list.h @@ -0,0 +1,17 @@ +/* + * This file and its contents are licensed under the Apache License 2.0. + * Please see the included NOTICE for copyright information and + * LICENSE-APACHE for a copy of the license. + */ +#pragma once + +#include "export.h" + +/* + * This file contains source code that was copied and/or modified from + * the PostgreSQL database, which is licensed under the open-source + * PostgreSQL License. Please see the NOTICE at the top level + * directory for a copy of the PostgreSQL License. + */ + +extern TSDLLEXPORT List *ts_new_list(NodeTag type, int min_size); diff --git a/src/nodes/chunk_append/planner.c b/src/nodes/chunk_append/planner.c index 8d00a47439e..500b25c245a 100644 --- a/src/nodes/chunk_append/planner.c +++ b/src/nodes/chunk_append/planner.c @@ -23,6 +23,7 @@ #include "nodes/chunk_append/chunk_append.h" #include "nodes/chunk_append/transform.h" #include "nodes/hypertable_modify.h" +#include "nodes/vector_agg.h" #include "import/planner.h" #include "guc.h" @@ -404,11 +405,35 @@ ts_chunk_append_get_scan_plan(Plan *plan) return (Scan *) plan; break; case T_CustomScan: - if (castNode(CustomScan, plan)->scan.scanrelid > 0) + { + CustomScan *custom = castNode(CustomScan, plan); + if (custom->scan.scanrelid > 0) + { + /* + * The custom plan node is a scan itself. This handles the + * DecompressChunk node. + */ return (Scan *) plan; - else - return NULL; - break; + } + + if (strcmp(custom->methods->CustomName, VECTOR_AGG_NODE_NAME) == 0) + { + /* + * This is a vectorized aggregation node, we have to recurse + * into its child, similar to the normal aggregation node. + * + * Unfortunately we have to hardcode the node name here, because + * we can't depend on the TSL library. + */ + return ts_chunk_append_get_scan_plan(linitial(custom->custom_plans)); + } + + /* + * This is some other unknown custom scan node, we can't recurse + * into it. + */ + return NULL; + } case T_Agg: if (plan->lefttree != NULL) { diff --git a/src/nodes/vector_agg.h b/src/nodes/vector_agg.h new file mode 100644 index 00000000000..e9da1f8a21f --- /dev/null +++ b/src/nodes/vector_agg.h @@ -0,0 +1,13 @@ +/* + * This file and its contents are licensed under the Apache License 2.0. + * Please see the included NOTICE for copyright information and + * LICENSE-APACHE for a copy of the license. + */ +#pragma once + +/* + * This file defines the node name of Vector Aggregation custom node, to be + * used in the Apache part of the Timescale extension. The node itself is in the + * the TSL part. + */ +#define VECTOR_AGG_NODE_NAME "VectorAgg" diff --git a/src/planner/partialize.c b/src/planner/partialize.c index e22fc2af134..b254ab4a9d8 100644 --- a/src/planner/partialize.c +++ b/src/planner/partialize.c @@ -421,14 +421,7 @@ add_partially_aggregated_subpaths(PlannerInfo *root, Path *parent_path, AggPath *agg_path = create_sorted_partial_agg_path(root, subpath, chunktarget, d_num_groups, extra_data); - if (ts_cm_functions->push_down_aggregation(root, agg_path, subpath)) - { - *sorted_paths = lappend(*sorted_paths, subpath); - } - else - { - *sorted_paths = lappend(*sorted_paths, (Path *) agg_path); - } + *sorted_paths = lappend(*sorted_paths, (Path *) agg_path); } if (can_hash) @@ -436,14 +429,7 @@ add_partially_aggregated_subpaths(PlannerInfo *root, Path *parent_path, AggPath *agg_path = create_hashed_partial_agg_path(root, subpath, chunktarget, d_num_groups, extra_data); - if (ts_cm_functions->push_down_aggregation(root, agg_path, subpath)) - { - *hashed_paths = lappend(*hashed_paths, subpath); - } - else - { - *hashed_paths = lappend(*hashed_paths, (Path *) agg_path); - } + *hashed_paths = lappend(*hashed_paths, (Path *) agg_path); } } diff --git a/src/planner/planner.c b/src/planner/planner.c index 43a1ca6ff1c..ca630d983d9 100644 --- a/src/planner/planner.c +++ b/src/planner/planner.c @@ -580,6 +580,8 @@ timescaledb_planner(Query *parse, const char *query_string, int cursor_opts, AGGSPLITOP_SERIALIZE | AGGSPLITOP_SKIPFINAL; } } + + ts_cm_functions->tsl_postprocess_plan(stmt); } if (reset_baserel_info) diff --git a/tsl/src/CMakeLists.txt b/tsl/src/CMakeLists.txt index 4752f839506..4029c421599 100644 --- a/tsl/src/CMakeLists.txt +++ b/tsl/src/CMakeLists.txt @@ -2,7 +2,6 @@ set(SOURCES chunk_api.c chunk.c init.c - partialize_agg.c partialize_finalize.c planner.c process_utility.c diff --git a/tsl/src/compression/array.c b/tsl/src/compression/array.c index c18029ace95..3c658382abd 100644 --- a/tsl/src/compression/array.c +++ b/tsl/src/compression/array.c @@ -571,7 +571,20 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls, const int validity_bitmap_bytes = sizeof(uint64) * (pad_to_multiple(64, n_total) / 64); uint64 *restrict validity_bitmap = MemoryContextAlloc(dest_mctx, validity_bitmap_bytes); + + /* + * First, mark all data as valid, we will fill the nulls later if needed. + * Note that the validity bitmap size is a multiple of 64 bits. We have to + * fill the tail bits with zeros, because the corresponding elements are not + * valid. + * + */ memset(validity_bitmap, 0xFF, validity_bitmap_bytes); + if (n_total % 64) + { + const uint64 tail_mask = -1ULL >> (64 - n_total % 64); + validity_bitmap[n_total / 64] &= tail_mask; + } if (has_nulls) { @@ -613,18 +626,6 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls, Assert(current_notnull_element == -1); } - else - { - /* - * The validity bitmap size is a multiple of 64 bits. Fill the tail bits - * with zeros, because the corresponding elements are not valid. - */ - if (n_total % 64) - { - const uint64 tail_mask = -1ULL >> (64 - n_total % 64); - validity_bitmap[n_total / 64] &= tail_mask; - } - } ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 3); const void **buffers = (const void **) &result[1]; diff --git a/tsl/src/compression/arrow_c_data_interface.h b/tsl/src/compression/arrow_c_data_interface.h index 64393c91b60..5a34870dff0 100644 --- a/tsl/src/compression/arrow_c_data_interface.h +++ b/tsl/src/compression/arrow_c_data_interface.h @@ -160,3 +160,22 @@ pad_to_multiple(uint64 pad_to, uint64 source_value) { return ((source_value + pad_to - 1) / pad_to) * pad_to; } + +static inline size_t +arrow_num_valid(uint64 *bitmap, size_t total_rows) +{ + uint64 num_valid = 0; +#ifdef HAVE__BUILTIN_POPCOUNT + const uint64 words = pad_to_multiple(64, total_rows) / 64; + for (uint64 i = 0; i < words; i++) + { + num_valid += __builtin_popcountll(bitmap[i]); + } +#else + for (size_t i = 0; i < total_rows; i++) + { + num_valid += arrow_row_is_valid(bitmap, i); + } +#endif + return num_valid; +} diff --git a/tsl/src/compression/deltadelta_impl.c b/tsl/src/compression/deltadelta_impl.c index 0dac02dd653..2673570134e 100644 --- a/tsl/src/compression/deltadelta_impl.c +++ b/tsl/src/compression/deltadelta_impl.c @@ -91,8 +91,19 @@ FUNCTION_NAME(delta_delta_decompress_all, ELEMENT_TYPE)(Datum compressed, Memory } #undef INNER_LOOP_SIZE - /* All data valid by default, we will fill in the nulls later. */ + /* + * First, mark all data as valid, we will fill the nulls later if needed. + * Note that the validity bitmap size is a multiple of 64 bits. We have to + * fill the tail bits with zeros, because the corresponding elements are not + * valid. + * + */ memset(validity_bitmap, 0xFF, validity_bitmap_bytes); + if (n_total % 64) + { + const uint64 tail_mask = -1ULL >> (64 - n_total % 64); + validity_bitmap[n_total / 64] &= tail_mask; + } /* Now move the data to account for nulls, and fill the validity bitmap. */ if (has_nulls) @@ -122,26 +133,6 @@ FUNCTION_NAME(delta_delta_decompress_all, ELEMENT_TYPE)(Datum compressed, Memory Assert(current_notnull_element == -1); } - else - { - /* - * The validity bitmap size is a multiple of 64 bits. Fill the tail bits - * with zeros, because the corresponding elements are not valid. - */ - if (n_total % 64) - { - const uint64 tail_mask = -1ULL >> (64 - n_total % 64); - validity_bitmap[n_total / 64] &= tail_mask; - -#ifdef USE_ASSERT_CHECKING - for (uint32 i = 0; i < 64; i++) - { - Assert(arrow_row_is_valid(validity_bitmap, (n_total / 64) * 64 + i) == - (i < n_total % 64)); - } -#endif - } - } /* Return the result. */ ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 2); diff --git a/tsl/src/compression/dictionary.c b/tsl/src/compression/dictionary.c index b5428818fe4..12997778fd7 100644 --- a/tsl/src/compression/dictionary.c +++ b/tsl/src/compression/dictionary.c @@ -454,7 +454,20 @@ tsl_text_dictionary_decompress_all(Datum compressed, Oid element_type, MemoryCon /* Fill validity and indices of the array elements, reshuffling for nulls if needed. */ const int validity_bitmap_bytes = sizeof(uint64) * pad_to_multiple(64, n_total) / 64; uint64 *restrict validity_bitmap = MemoryContextAlloc(dest_mctx, validity_bitmap_bytes); + + /* + * First, mark all data as valid, we will fill the nulls later if needed. + * Note that the validity bitmap size is a multiple of 64 bits. We have to + * fill the tail bits with zeros, because the corresponding elements are not + * valid. + * + */ memset(validity_bitmap, 0xFF, validity_bitmap_bytes); + if (n_total % 64) + { + const uint64 tail_mask = -1ULL >> (64 - n_total % 64); + validity_bitmap[n_total / 64] &= tail_mask; + } if (header->has_nulls) { @@ -485,18 +498,6 @@ tsl_text_dictionary_decompress_all(Datum compressed, Oid element_type, MemoryCon Assert(current_notnull_element == -1); } - else - { - /* - * The validity bitmap size is a multiple of 64 bits. Fill the tail bits - * with zeros, because the corresponding elements are not valid. - */ - if (n_total % 64) - { - const uint64 tail_mask = -1ULL >> (64 - n_total % 64); - validity_bitmap[n_total / 64] &= tail_mask; - } - } ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 2); const void **buffers = (const void **) &result[1]; diff --git a/tsl/src/compression/gorilla_impl.c b/tsl/src/compression/gorilla_impl.c index 2cc940fd034..4ea4998a50b 100644 --- a/tsl/src/compression/gorilla_impl.c +++ b/tsl/src/compression/gorilla_impl.c @@ -136,10 +136,18 @@ FUNCTION_NAME(gorilla_decompress_all, ELEMENT_TYPE)(CompressedGorillaData *goril uint64 *restrict validity_bitmap = MemoryContextAlloc(dest_mctx, validity_bitmap_bytes); /* - * For starters, set the validity bitmap to all ones. We probably have less - * nulls than values, so this is faster. + * First, mark all data as valid, we will fill the nulls later if needed. + * Note that the validity bitmap size is a multiple of 64 bits. We have to + * fill the tail bits with zeros, because the corresponding elements are not + * valid. + * */ memset(validity_bitmap, 0xFF, validity_bitmap_bytes); + if (n_total % 64) + { + const uint64 tail_mask = -1ULL >> (64 - n_total % 64); + validity_bitmap[n_total / 64] &= tail_mask; + } if (has_nulls) { @@ -169,26 +177,6 @@ FUNCTION_NAME(gorilla_decompress_all, ELEMENT_TYPE)(CompressedGorillaData *goril Assert(current_notnull_element == -1); } - else - { - /* - * The validity bitmap size is a multiple of 64 bits. Fill the tail bits - * with zeros, because the corresponding elements are not valid. - */ - if (n_total % 64) - { - const uint64 tail_mask = -1ULL >> (64 - n_total % 64); - validity_bitmap[n_total / 64] &= tail_mask; - -#ifdef USE_ASSERT_CHECKING - for (uint32 i = 0; i < 64; i++) - { - Assert(arrow_row_is_valid(validity_bitmap, (n_total / 64) * 64 + i) == - (i < n_total % 64)); - } -#endif - } - } /* Return the result. */ ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 2); diff --git a/tsl/src/init.c b/tsl/src/init.c index f43a7c2e487..5ef72970468 100644 --- a/tsl/src/init.c +++ b/tsl/src/init.c @@ -39,7 +39,7 @@ #include "nodes/decompress_chunk/planner.h" #include "nodes/skip_scan/skip_scan.h" #include "nodes/gapfill/gapfill_functions.h" -#include "partialize_agg.h" +#include "nodes/vector_agg/plan.h" #include "partialize_finalize.h" #include "planner.h" #include "process_utility.h" @@ -118,7 +118,7 @@ CrossModuleFunctions tsl_cm_functions = { .policies_show = policies_show, /* Vectorized queries */ - .push_down_aggregation = apply_vectorized_agg_optimization, + .tsl_postprocess_plan = tsl_postprocess_plan, /* Continuous Aggregates */ .partialize_agg = tsl_partialize_agg, @@ -194,6 +194,7 @@ ts_module_init(PG_FUNCTION_ARGS) _continuous_aggs_cache_inval_init(); _decompress_chunk_init(); _skip_scan_init(); + _vector_agg_init(); /* Register a cleanup function to be called when the backend exits */ if (register_proc_exit) on_proc_exit(ts_module_cleanup_on_pg_exit, 0); diff --git a/tsl/src/nodes/CMakeLists.txt b/tsl/src/nodes/CMakeLists.txt index e7789b8a314..d3eecdba8ad 100644 --- a/tsl/src/nodes/CMakeLists.txt +++ b/tsl/src/nodes/CMakeLists.txt @@ -5,3 +5,4 @@ add_subdirectory(decompress_chunk) add_subdirectory(frozen_chunk_dml) add_subdirectory(gapfill) add_subdirectory(skip_scan) +add_subdirectory(vector_agg) diff --git a/tsl/src/nodes/decompress_chunk/decompress_chunk.h b/tsl/src/nodes/decompress_chunk/decompress_chunk.h index a320fc23d11..96d335a84bd 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_chunk.h +++ b/tsl/src/nodes/decompress_chunk/decompress_chunk.h @@ -55,58 +55,6 @@ typedef struct DecompressChunkPath { CustomPath custom_path; CompressionInfo *info; - /* - * decompression_map maps targetlist entries of the compressed scan to tuple - * attribute number of the uncompressed chunk. Negative values are special - * columns in the compressed scan that do not have a representation in the - * uncompressed chunk, but are still used for decompression. - */ - List *decompression_map; - - /* - * This Int list is parallel to the compressed scan targetlist, just like - * the above one. The value is true if a given targetlist entry is a - * segmentby column, false otherwise. Has the same length as the above list. - * We have to use the parallel lists and not a list of structs, because the - * Plans have to be copyable by the Postgres _copy functions, and we can't - * do that for a custom struct. - */ - List *is_segmentby_column; - - /* - * Same structure as above, says whether we support bulk decompression for this - * column. - */ - List *bulk_decompression_column; - - /* - * If we produce at least some columns that support bulk decompression. - */ - bool have_bulk_decompression_columns; - - /* - * Maps the uncompressed chunk attno to the respective column compression - * info. This lives only during planning so that we can understand on which - * columns we can apply vectorized quals. - */ - DecompressChunkColumnCompression *uncompressed_chunk_attno_to_compression_info; - - /* - * Are we able to execute a vectorized aggregation - */ - bool perform_vectorized_aggregation; - - /* - * Columns that are used for vectorized aggregates. The list contains for each attribute -1 if - * this is not an vectorized aggregate column or the Oid of the data type of the attribute. - * - * When creating vectorized aggregates, the decompression logic is not able to determine the - * type of the compressed column based on the output column since we emit partial aggregates - * for this attribute and the raw attribute is not found in the targetlist. So, build a map - * with the used data types here, which is used later to create the compression info - * properly. - */ - List *aggregated_column_type; List *required_compressed_pathkeys; bool needs_sequence_num; diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index e9bebc5bb06..f6959b65689 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -18,6 +18,8 @@ #include #include +#include + #include "compat/compat.h" #include "compression/array.h" #include "compression/arrow_c_data_interface.h" @@ -61,23 +63,25 @@ decompress_chunk_state_create(CustomScan *cscan) chunk_state->csstate.methods = &chunk_state->exec_methods; Assert(IsA(cscan->custom_private, List)); - Assert(list_length(cscan->custom_private) == 6); - List *settings = linitial(cscan->custom_private); - chunk_state->decompression_map = lsecond(cscan->custom_private); - chunk_state->is_segmentby_column = lthird(cscan->custom_private); - chunk_state->bulk_decompression_column = lfourth(cscan->custom_private); - chunk_state->aggregated_column_type = lfifth(cscan->custom_private); - chunk_state->sortinfo = lsixth(cscan->custom_private); + Assert(list_length(cscan->custom_private) == DCP_Count); + List *settings = list_nth(cscan->custom_private, DCP_Settings); + chunk_state->decompression_map = list_nth(cscan->custom_private, DCP_DecompressionMap); + chunk_state->is_segmentby_column = list_nth(cscan->custom_private, DCP_IsSegmentbyColumn); + chunk_state->bulk_decompression_column = + list_nth(cscan->custom_private, DCP_BulkDecompressionColumn); + chunk_state->sortinfo = list_nth(cscan->custom_private, DCP_SortInfo); + chunk_state->custom_scan_tlist = cscan->custom_scan_tlist; Assert(IsA(settings, IntList)); - Assert(list_length(settings) == 6); - chunk_state->hypertable_id = linitial_int(settings); - chunk_state->chunk_relid = lsecond_int(settings); - chunk_state->decompress_context.reverse = lthird_int(settings); - chunk_state->decompress_context.batch_sorted_merge = lfourth_int(settings); - chunk_state->decompress_context.enable_bulk_decompression = lfifth_int(settings); - chunk_state->perform_vectorized_aggregation = lsixth_int(settings); + Assert(list_length(settings) == DCS_Count); + chunk_state->hypertable_id = list_nth_int(settings, DCS_HypertableId); + chunk_state->chunk_relid = list_nth_int(settings, DCS_ChunkRelid); + chunk_state->decompress_context.reverse = list_nth_int(settings, DCS_Reverse); + chunk_state->decompress_context.batch_sorted_merge = + list_nth_int(settings, DCS_BatchSortedMerge); + chunk_state->decompress_context.enable_bulk_decompression = + list_nth_int(settings, DCS_EnableBulkDecompression); Assert(IsA(cscan->custom_exprs, List)); Assert(list_length(cscan->custom_exprs) == 1); @@ -85,14 +89,6 @@ decompress_chunk_state_create(CustomScan *cscan) Assert(list_length(chunk_state->decompression_map) == list_length(chunk_state->is_segmentby_column)); -#ifdef USE_ASSERT_CHECKING - if (chunk_state->perform_vectorized_aggregation) - { - Assert(list_length(chunk_state->decompression_map) == - list_length(chunk_state->aggregated_column_type)); - } -#endif - return (Node *) chunk_state; } @@ -291,22 +287,12 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) if (column.output_attno > 0) { - if (chunk_state->perform_vectorized_aggregation && - lfirst_int(list_nth_cell(chunk_state->aggregated_column_type, compressed_index)) != - -1) - { - column.typid = lfirst_int( - list_nth_cell(chunk_state->aggregated_column_type, compressed_index)); - } - else - { - /* normal column that is also present in decompressed chunk */ - Form_pg_attribute attribute = - TupleDescAttr(desc, AttrNumberGetAttrOffset(column.output_attno)); + /* normal column that is also present in decompressed chunk */ + Form_pg_attribute attribute = + TupleDescAttr(desc, AttrNumberGetAttrOffset(column.output_attno)); - column.typid = attribute->atttypid; - get_typlenbyval(column.typid, &column.value_bytes, &column.by_value); - } + column.typid = attribute->atttypid; + get_typlenbyval(column.typid, &column.value_bytes, &column.by_value); if (list_nth_int(chunk_state->is_segmentby_column, compressed_index)) column.type = SEGMENTBY_COLUMN; @@ -389,274 +375,6 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) detoaster_init(&dcontext->detoaster, CurrentMemoryContext); } -/* - * Perform a vectorized aggregation on int4 values - */ -static TupleTableSlot * -perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) -{ - DecompressContext *dcontext = &chunk_state->decompress_context; - BatchQueue *batch_queue = chunk_state->batch_queue; - - Assert(chunk_state != NULL); - Assert(aggref != NULL); - - /* Partial result is a int8 */ - Assert(aggref->aggtranstype == INT8OID); - - /* Two columns are decompressed, the column that needs to be aggregated and the count column */ - Assert(dcontext->num_total_columns == 2); - - CompressionColumnDescription *value_column_description = &dcontext->template_columns[0]; - CompressionColumnDescription *count_column_description = &dcontext->template_columns[1]; - if (count_column_description->type != COUNT_COLUMN) - { - /* - * The count and value columns can go in different order based on their - * order in compressed chunk, so check which one we are seeing. - */ - CompressionColumnDescription *tmp = value_column_description; - value_column_description = count_column_description; - count_column_description = tmp; - } - Assert(value_column_description->type == COMPRESSED_COLUMN || - value_column_description->type == SEGMENTBY_COLUMN); - Assert(count_column_description->type == COUNT_COLUMN); - - /* Get a free batch slot */ - const int new_batch_index = batch_array_get_unused_slot(&batch_queue->batch_array); - - /* Nobody else should use batch states */ - Assert(new_batch_index == 0); - DecompressBatchState *batch_state = - batch_array_get_at(&batch_queue->batch_array, new_batch_index); - - /* Init per batch memory context */ - Assert(batch_state != NULL); - Assert(batch_state->per_batch_context == NULL); - batch_state->per_batch_context = create_per_batch_mctx(dcontext); - Assert(batch_state->per_batch_context != NULL); - - /* Init bulk decompression memory context */ - Assert(dcontext->bulk_decompression_context == NULL); - dcontext->bulk_decompression_context = create_bulk_decompression_mctx(CurrentMemoryContext); - Assert(dcontext->bulk_decompression_context != NULL); - - /* Get a reference the the output TupleTableSlot */ - TupleTableSlot *decompressed_scan_slot = chunk_state->csstate.ss.ss_ScanTupleSlot; - Assert(decompressed_scan_slot->tts_tupleDescriptor->natts == 1); - - /* Set all attributes of the result tuple to NULL. So, we return NULL if no data is processed - * by our implementation. In addition, the call marks the slot as being used (i.e., no - * ExecStoreVirtualTuple call is required). */ - ExecStoreAllNullTuple(decompressed_scan_slot); - Assert(!TupIsNull(decompressed_scan_slot)); - - int64 result_sum = 0; - - if (value_column_description->type == SEGMENTBY_COLUMN) - { - /* - * To calculate the sum for a segment by value, we need to multiply the value of the segment - * by column with the number of compressed tuples in this batch. - */ - while (true) - { - TupleTableSlot *compressed_slot = - ExecProcNode(linitial(chunk_state->csstate.custom_ps)); - - if (TupIsNull(compressed_slot)) - { - /* All segment by values are processed. */ - break; - } - - MemoryContext old_mctx = MemoryContextSwitchTo(batch_state->per_batch_context); - MemoryContextReset(batch_state->per_batch_context); - - bool isnull_value, isnull_elements; - Datum value = slot_getattr(compressed_slot, - value_column_description->compressed_scan_attno, - &isnull_value); - - /* We have multiple compressed tuples for this segment by value. Get number of - * compressed tuples */ - Datum elements = slot_getattr(compressed_slot, - count_column_description->compressed_scan_attno, - &isnull_elements); - - if (!isnull_value && !isnull_elements) - { - int32 intvalue = DatumGetInt32(value); - int32 amount = DatumGetInt32(elements); - int64 batch_sum = 0; - - Assert(amount > 0); - - /* We have at least one value */ - decompressed_scan_slot->tts_isnull[0] = false; - - /* Multiply the number of tuples with the actual value */ - if (unlikely(pg_mul_s64_overflow(intvalue, amount, &batch_sum))) - { - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - } - - /* Add the value to our sum */ - if (unlikely(pg_add_s64_overflow(result_sum, batch_sum, ((int64 *) &result_sum)))) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - } - MemoryContextSwitchTo(old_mctx); - } - } - else if (value_column_description->type == COMPRESSED_COLUMN) - { - Assert(dcontext->enable_bulk_decompression); - Assert(value_column_description->bulk_decompression_supported); - Assert(list_length(aggref->args) == 1); - - while (true) - { - TupleTableSlot *compressed_slot = - ExecProcNode(linitial(chunk_state->csstate.custom_ps)); - if (TupIsNull(compressed_slot)) - { - /* All compressed batches are processed. */ - break; - } - - MemoryContext old_mctx = MemoryContextSwitchTo(batch_state->per_batch_context); - MemoryContextReset(batch_state->per_batch_context); - - /* Decompress data */ - bool isnull; - Datum value = slot_getattr(compressed_slot, - value_column_description->compressed_scan_attno, - &isnull); - - Ensure(isnull == false, "got unexpected NULL attribute value from compressed batch"); - - /* We have at least one value */ - decompressed_scan_slot->tts_isnull[0] = false; - - CompressedDataHeader *header = (CompressedDataHeader *) - detoaster_detoast_attr_copy((struct varlena *) DatumGetPointer(value), - &dcontext->detoaster, - CurrentMemoryContext); - - ArrowArray *arrow = NULL; - - DecompressAllFunction decompress_all = - tsl_get_decompress_all_function(header->compression_algorithm, - value_column_description->typid); - Assert(decompress_all != NULL); - - MemoryContextSwitchTo(dcontext->bulk_decompression_context); - - arrow = decompress_all(PointerGetDatum(header), - value_column_description->typid, - batch_state->per_batch_context); - - Assert(arrow != NULL); - - MemoryContextReset(dcontext->bulk_decompression_context); - MemoryContextSwitchTo(batch_state->per_batch_context); - - /* - * We accumulate the sum as int64, so we can sum INT_MAX = 2^31 - 1 - * at least 2^31 times without incurrint an overflow of the int64 - * accumulator. The same is true for negative numbers. The - * compressed batch size is currently capped at 1000 rows, but even - * if it's changed in the future, it's unlikely that we support - * batches larger than 65536 rows, not to mention 2^31. Therefore, - * we don't need to check for overflows within the loop, which would - * slow down the calculation. - */ - Assert(arrow->length <= INT_MAX); - - int64 batch_sum = 0; - for (int i = 0; i < arrow->length; i++) - { - const bool arrow_isnull = !arrow_row_is_valid(arrow->buffers[0], i); - - if (likely(!arrow_isnull)) - { - const int32 arrow_value = ((int32 *) arrow->buffers[1])[i]; - batch_sum += arrow_value; - } - } - - if (unlikely(pg_add_s64_overflow(result_sum, batch_sum, ((int64 *) &result_sum)))) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - MemoryContextSwitchTo(old_mctx); - } - } - else - { - elog(ERROR, "unsupported column type"); - } - - /* Use Int64GetDatum to store the result since a 64-bit value is not pass-by-value on 32-bit - * systems */ - decompressed_scan_slot->tts_values[0] = Int64GetDatum(result_sum); - - return decompressed_scan_slot; -} - -/* - * Directly execute an aggregation function on decompressed data and emit a partial aggregate - * result. - * - * Executing the aggregation directly in this node makes it possible to use the columnar data - * directly before it is converted into row-based tuples. - */ -static TupleTableSlot * -perform_vectorized_aggregation(DecompressChunkState *chunk_state) -{ - BatchQueue *bq = chunk_state->batch_queue; - - Assert(list_length(chunk_state->custom_scan_tlist) == 1); - - /* Checked by planner */ - Assert(ts_guc_enable_vectorized_aggregation); - Assert(ts_guc_enable_bulk_decompression); - - /* When using vectorized aggregates, only one result tuple is produced. So, if we have - * already initialized a batch state, the aggregation was already performed. - */ - if (batch_array_has_active_batches(&bq->batch_array)) - { - ExecClearTuple(chunk_state->csstate.ss.ss_ScanTupleSlot); - return chunk_state->csstate.ss.ss_ScanTupleSlot; - } - - /* Determine which kind of vectorized aggregation we should perform */ - TargetEntry *tlentry = (TargetEntry *) linitial(chunk_state->custom_scan_tlist); - Assert(IsA(tlentry->expr, Aggref)); - Aggref *aggref = castNode(Aggref, tlentry->expr); - - /* The aggregate should be a partial aggregate */ - Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); - - switch (aggref->aggfnoid) - { - case F_SUM_INT4: - return perform_vectorized_sum_int4(chunk_state, aggref); - default: - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("vectorized aggregation for function %d is not supported", - aggref->aggfnoid))); - pg_unreachable(); - } -} - /* * The exec function for the DecompressChunk node. It takes the explicit queue * functions pointer as an optimization, to allow these functions to be @@ -671,11 +389,6 @@ decompress_chunk_exec_impl(DecompressChunkState *chunk_state, const BatchQueueFu Assert(bq->funcs == bqfuncs); - if (chunk_state->perform_vectorized_aggregation) - { - return perform_vectorized_aggregation(chunk_state); - } - bqfuncs->pop(bq, dcontext); while (bqfuncs->needs_next_batch(bq)) @@ -780,12 +493,5 @@ decompress_chunk_explain(CustomScanState *node, List *ancestors, ExplainState *e chunk_state->decompress_context.enable_bulk_decompression, es); } - - if (chunk_state->perform_vectorized_aggregation) - { - ExplainPropertyBool("Vectorized Aggregation", - chunk_state->perform_vectorized_aggregation, - es); - } } } diff --git a/tsl/src/nodes/decompress_chunk/exec.h b/tsl/src/nodes/decompress_chunk/exec.h index b3a43640e50..172672bedcc 100644 --- a/tsl/src/nodes/decompress_chunk/exec.h +++ b/tsl/src/nodes/decompress_chunk/exec.h @@ -20,7 +20,6 @@ typedef struct DecompressChunkState List *decompression_map; List *is_segmentby_column; List *bulk_decompression_column; - List *aggregated_column_type; List *custom_scan_tlist; DecompressContext decompress_context; @@ -33,10 +32,6 @@ typedef struct DecompressChunkState List *sortinfo; - /* Perform calculation of the aggregate directly in the decompress chunk node and emit partials - */ - bool perform_vectorized_aggregation; - /* * For some predicates, we have more efficient implementation that work on * the entire compressed batch in one go. They go to this list, and the rest @@ -49,3 +44,6 @@ typedef struct DecompressChunkState } DecompressChunkState; extern Node *decompress_chunk_state_create(CustomScan *cscan); + +TupleTableSlot *decompress_chunk_exec_vector_agg_impl(CustomScanState *vector_agg_state, + DecompressChunkState *decompress_state); diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index 492cb280b75..01330f68171 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -29,10 +29,12 @@ #include "custom_type_cache.h" #include "guc.h" #include "import/planner.h" +#include "import/list.h" #include "nodes/decompress_chunk/decompress_chunk.h" #include "nodes/decompress_chunk/exec.h" #include "nodes/decompress_chunk/planner.h" #include "nodes/chunk_append/transform.h" +#include "nodes/vector_agg/exec.h" #include "vector_predicates.h" #include "ts_catalog/array_utils.h" @@ -62,18 +64,61 @@ check_for_system_columns(Bitmapset *attrs_used) } } +/* + * Scratch space for mapping out the decompressed columns. + */ +typedef struct +{ + /* + * decompression_map maps targetlist entries of the compressed scan to tuple + * attribute number of the uncompressed chunk. Negative values are special + * columns in the compressed scan that do not have a representation in the + * uncompressed chunk, but are still used for decompression. + */ + List *decompression_map; + + /* + * This Int list is parallel to the compressed scan targetlist, just like + * the above one. The value is true if a given targetlist entry is a + * segmentby column, false otherwise. Has the same length as the above list. + * We have to use the parallel lists and not a list of structs, because the + * Plans have to be copyable by the Postgres _copy functions, and we can't + * do that for a custom struct. + */ + List *is_segmentby_column; + + /* + * Same structure as above, says whether we support bulk decompression for this + * column. + */ + List *bulk_decompression_column; + + /* + * If we produce at least some columns that support bulk decompression. + */ + bool have_bulk_decompression_columns; + + /* + * Maps the uncompressed chunk attno to the respective column compression + * info. This lives only during planning so that we can understand on which + * columns we can apply vectorized quals. + */ + DecompressChunkColumnCompression *uncompressed_chunk_attno_to_compression_info; +} DecompressionMapContext; + /* * Given the scan targetlist and the bitmapset of the needed columns, determine * which scan columns become which decompressed columns (fill decompression_map). * - * Note that the chunk_attrs_needed bitmap is offset by the + * Note that the uncompressed_attrs_needed bitmap is offset by the * FirstLowInvalidHeapAttributeNumber, similar to RelOptInfo.attr_needed. This * allows to encode the requirement for system columns, which have negative * attnos. */ static void -build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan_tlist, - Bitmapset *chunk_attrs_needed) +build_decompression_map(PlannerInfo *root, DecompressionMapContext *context, + DecompressChunkPath *path, List *compressed_scan_tlist, + Bitmapset *uncompressed_attrs_needed) { /* * Track which normal and metadata columns we were able to find in the @@ -81,7 +126,8 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan */ bool missing_count = true; bool missing_sequence = path->needs_sequence_num; - Bitmapset *chunk_attrs_found = NULL, *selectedCols = NULL; + Bitmapset *uncompressed_attrs_found = NULL; + Bitmapset *selectedCols = NULL; #if PG16_LT selectedCols = path->info->ht_rte->selectedCols; @@ -106,26 +152,26 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan * be added at decompression time. Always mark it as found. */ if (bms_is_member(TableOidAttributeNumber - FirstLowInvalidHeapAttributeNumber, - chunk_attrs_needed)) + uncompressed_attrs_needed)) { - chunk_attrs_found = - bms_add_member(chunk_attrs_found, + uncompressed_attrs_found = + bms_add_member(uncompressed_attrs_found, TableOidAttributeNumber - FirstLowInvalidHeapAttributeNumber); } ListCell *lc; - path->uncompressed_chunk_attno_to_compression_info = - palloc0(sizeof(*path->uncompressed_chunk_attno_to_compression_info) * + context->uncompressed_chunk_attno_to_compression_info = + palloc0(sizeof(*context->uncompressed_chunk_attno_to_compression_info) * (path->info->chunk_rel->max_attr + 1)); /* * Go over the scan targetlist and determine to which output column each * scan column goes, saving other additional info as we do that. */ - path->have_bulk_decompression_columns = false; - path->decompression_map = NIL; - foreach (lc, scan_tlist) + context->have_bulk_decompression_columns = false; + context->decompression_map = NIL; + foreach (lc, compressed_scan_tlist) { TargetEntry *target = (TargetEntry *) lfirst(lc); if (!IsA(target->expr, Var)) @@ -151,33 +197,33 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan const char *column_name = get_attname(path->info->compressed_rte->relid, compressed_attno, /* missing_ok = */ false); - AttrNumber chunk_attno = get_attnum(path->info->chunk_rte->relid, column_name); + AttrNumber uncompressed_attno = get_attnum(path->info->chunk_rte->relid, column_name); - AttrNumber destination_attno_in_uncompressed_chunk = 0; - if (chunk_attno != InvalidAttrNumber) + AttrNumber destination_attno = 0; + if (uncompressed_attno != InvalidAttrNumber) { /* * Normal column, not a metadata column. */ - Assert(chunk_attno != InvalidAttrNumber); + Assert(uncompressed_attno != InvalidAttrNumber); - if (bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, chunk_attrs_needed)) + if (bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, uncompressed_attrs_needed)) { /* * attno = 0 means whole-row var. Output all the columns. */ - destination_attno_in_uncompressed_chunk = chunk_attno; - chunk_attrs_found = - bms_add_member(chunk_attrs_found, - chunk_attno - FirstLowInvalidHeapAttributeNumber); + destination_attno = uncompressed_attno; + uncompressed_attrs_found = + bms_add_member(uncompressed_attrs_found, + uncompressed_attno - FirstLowInvalidHeapAttributeNumber); } - else if (bms_is_member(chunk_attno - FirstLowInvalidHeapAttributeNumber, - chunk_attrs_needed)) + else if (bms_is_member(uncompressed_attno - FirstLowInvalidHeapAttributeNumber, + uncompressed_attrs_needed)) { - destination_attno_in_uncompressed_chunk = chunk_attno; - chunk_attrs_found = - bms_add_member(chunk_attrs_found, - chunk_attno - FirstLowInvalidHeapAttributeNumber); + destination_attno = uncompressed_attno; + uncompressed_attrs_found = + bms_add_member(uncompressed_attrs_found, + uncompressed_attno - FirstLowInvalidHeapAttributeNumber); } } else @@ -197,58 +243,44 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan if (strcmp(column_name, COMPRESSION_COLUMN_METADATA_COUNT_NAME) == 0) { - destination_attno_in_uncompressed_chunk = DECOMPRESS_CHUNK_COUNT_ID; + destination_attno = DECOMPRESS_CHUNK_COUNT_ID; missing_count = false; } else if (path->needs_sequence_num && strcmp(column_name, COMPRESSION_COLUMN_METADATA_SEQUENCE_NUM_NAME) == 0) { - destination_attno_in_uncompressed_chunk = DECOMPRESS_CHUNK_SEQUENCE_NUM_ID; + destination_attno = DECOMPRESS_CHUNK_SEQUENCE_NUM_ID; missing_sequence = false; } } bool is_segment = ts_array_is_member(path->info->settings->fd.segmentby, column_name); - path->decompression_map = - lappend_int(path->decompression_map, destination_attno_in_uncompressed_chunk); - path->is_segmentby_column = lappend_int(path->is_segmentby_column, is_segment); + context->decompression_map = lappend_int(context->decompression_map, destination_attno); + context->is_segmentby_column = lappend_int(context->is_segmentby_column, is_segment); /* * Determine if we can use bulk decompression for this column. */ - Oid typoid = get_atttype(path->info->chunk_rte->relid, chunk_attno); + Oid typoid = get_atttype(path->info->chunk_rte->relid, uncompressed_attno); const bool bulk_decompression_possible = - !is_segment && destination_attno_in_uncompressed_chunk > 0 && + !is_segment && destination_attno > 0 && tsl_get_decompress_all_function(compression_get_default_algorithm(typoid), typoid) != NULL; - path->have_bulk_decompression_columns |= bulk_decompression_possible; - path->bulk_decompression_column = - lappend_int(path->bulk_decompression_column, bulk_decompression_possible); + context->have_bulk_decompression_columns |= bulk_decompression_possible; + context->bulk_decompression_column = + lappend_int(context->bulk_decompression_column, bulk_decompression_possible); /* * Save information about decompressed columns in uncompressed chunk * for planning of vectorized filters. */ - if (destination_attno_in_uncompressed_chunk > 0) + if (destination_attno > 0) { - path->uncompressed_chunk_attno_to_compression_info - [destination_attno_in_uncompressed_chunk] = + context->uncompressed_chunk_attno_to_compression_info[destination_attno] = (DecompressChunkColumnCompression){ .bulk_decompression_possible = bulk_decompression_possible }; } - - if (path->perform_vectorized_aggregation) - { - Assert(list_length(path->custom_path.path.parent->reltarget->exprs) == 1); - Var *var = linitial(path->custom_path.path.parent->reltarget->exprs); - Assert((Index) var->varno == path->custom_path.path.parent->relid); - if (var->varattno == destination_attno_in_uncompressed_chunk) - path->aggregated_column_type = - lappend_int(path->aggregated_column_type, var->vartype); - else - path->aggregated_column_type = lappend_int(path->aggregated_column_type, -1); - } } /* @@ -256,7 +288,8 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan * We can't conveniently check that we have all columns for all-row vars, so * skip attno 0 in this check. */ - Bitmapset *attrs_not_found = bms_difference(chunk_attrs_needed, chunk_attrs_found); + Bitmapset *attrs_not_found = + bms_difference(uncompressed_attrs_needed, uncompressed_attrs_found); int bit = bms_next_member(attrs_not_found, 0 - FirstLowInvalidHeapAttributeNumber); if (bit >= 0) { @@ -428,7 +461,7 @@ is_not_runtime_constant(Node *node) * commuted copy. If not, return NULL. */ static Node * -make_vectorized_qual(DecompressChunkPath *path, Node *qual) +make_vectorized_qual(DecompressionMapContext *context, DecompressChunkPath *path, Node *qual) { /* * We can vectorize BoolExpr (AND/OR/NOT). @@ -452,7 +485,7 @@ make_vectorized_qual(DecompressChunkPath *path, Node *qual) foreach (lc, boolexpr->args) { Node *arg = lfirst(lc); - Node *vectorized_arg = make_vectorized_qual(path, arg); + Node *vectorized_arg = make_vectorized_qual(context, path, arg); if (vectorized_arg == NULL) { return NULL; @@ -569,7 +602,7 @@ make_vectorized_qual(DecompressChunkPath *path, Node *qual) * ExecQual is performed before ExecProject and operates on the decompressed * scan slot, so the qual attnos are the uncompressed chunk attnos. */ - if (!path->uncompressed_chunk_attno_to_compression_info[var->varattno] + if (!context->uncompressed_chunk_attno_to_compression_info[var->varattno] .bulk_decompression_possible) { /* This column doesn't support bulk decompression. */ @@ -645,8 +678,8 @@ make_vectorized_qual(DecompressChunkPath *path, Node *qual) * list. */ static void -find_vectorized_quals(DecompressChunkPath *path, List *qual_list, List **vectorized, - List **nonvectorized) +find_vectorized_quals(DecompressionMapContext *context, DecompressChunkPath *path, List *qual_list, + List **vectorized, List **nonvectorized) { ListCell *lc; foreach (lc, qual_list) @@ -662,7 +695,7 @@ find_vectorized_quals(DecompressChunkPath *path, List *qual_list, List **vectori Node *transformed_comparison = (Node *) ts_transform_cross_datatype_comparison((Expr *) source_qual); - Node *vectorized_qual = make_vectorized_qual(path, transformed_comparison); + Node *vectorized_qual = make_vectorized_qual(context, path, transformed_comparison); if (vectorized_qual) { *vectorized = lappend(*vectorized, vectorized_qual); @@ -716,7 +749,7 @@ ts_label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples) Plan * decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *path, - List *decompressed_tlist, List *clauses, List *custom_plans) + List *output_targetlist, List *clauses, List *custom_plans) { DecompressChunkPath *dcpath = (DecompressChunkPath *) path; CustomScan *decompress_plan = makeNode(CustomScan); @@ -733,19 +766,7 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat decompress_plan->scan.scanrelid = dcpath->info->chunk_rel->relid; /* output target list */ - decompress_plan->scan.plan.targetlist = decompressed_tlist; - /* input target list */ - decompress_plan->custom_scan_tlist = NIL; - - /* Make PostgreSQL aware that we emit partials. In apply_vectorized_agg_optimization the - * pathtarget of the node is changed; the decompress chunk node now emits prtials directly. - * - * We have to set a custom_scan_tlist to make sure tlist_matches_tupdesc is true to prevent the - * call of ExecAssignProjectionInfo in ExecConditionalAssignProjectionInfo. Otherwise, - * PostgreSQL will error out since scan nodes are not intended to emit partial aggregates. - */ - if (dcpath->perform_vectorized_aggregation) - decompress_plan->custom_scan_tlist = decompressed_tlist; + decompress_plan->scan.plan.targetlist = output_targetlist; if (IsA(compressed_path, IndexPath)) { @@ -827,25 +848,30 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat /* * Determine which columns we have to decompress. - * decompressed_tlist is sometimes empty, e.g. for a direct select from + * output_targetlist is sometimes empty, e.g. for a direct select from * chunk. We have a ProjectionPath above DecompressChunk in this case, and * the targetlist for this path is not built by the planner * (CP_IGNORE_TLIST). This is why we have to examine rel pathtarget. * Looking at the targetlist is not enough, we also have to decompress the * columns participating in quals and in pathkeys. */ - Bitmapset *chunk_attrs_needed = NULL; + Bitmapset *uncompressed_attrs_needed = NULL; pull_varattnos((Node *) decompress_plan->scan.plan.qual, dcpath->info->chunk_rel->relid, - &chunk_attrs_needed); + &uncompressed_attrs_needed); pull_varattnos((Node *) dcpath->custom_path.path.pathtarget->exprs, dcpath->info->chunk_rel->relid, - &chunk_attrs_needed); + &uncompressed_attrs_needed); /* * Determine which compressed column goes to which output column. */ - build_decompression_map(root, dcpath, compressed_scan->plan.targetlist, chunk_attrs_needed); + DecompressionMapContext context = { 0 }; + build_decompression_map(root, + &context, + dcpath, + compressed_scan->plan.targetlist, + uncompressed_attrs_needed); /* Build heap sort info for sorted_merge_append */ List *sort_options = NIL; @@ -1038,7 +1064,7 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat const bool enable_bulk_decompression = !dcpath->batch_sorted_merge && ts_guc_enable_bulk_decompression && - dcpath->have_bulk_decompression_columns; + context.have_bulk_decompression_columns; /* * For some predicates, we have more efficient implementation that work on @@ -1049,7 +1075,8 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat if (enable_bulk_decompression) { List *nonvectorized_quals = NIL; - find_vectorized_quals(dcpath, + find_vectorized_quals(&context, + dcpath, decompress_plan->scan.plan.qual, &vectorized_quals, &nonvectorized_quals); @@ -1075,12 +1102,12 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat } #endif - settings = list_make6_int(dcpath->info->hypertable_id, - dcpath->info->chunk_rte->relid, - dcpath->reverse, - dcpath->batch_sorted_merge, - enable_bulk_decompression, - dcpath->perform_vectorized_aggregation); + settings = ts_new_list(T_IntList, DCS_Count); + lfirst_int(list_nth_cell(settings, DCS_HypertableId)) = dcpath->info->hypertable_id; + lfirst_int(list_nth_cell(settings, DCS_ChunkRelid)) = dcpath->info->chunk_rte->relid; + lfirst_int(list_nth_cell(settings, DCS_Reverse)) = dcpath->reverse; + lfirst_int(list_nth_cell(settings, DCS_BatchSortedMerge)) = dcpath->batch_sorted_merge; + lfirst_int(list_nth_cell(settings, DCS_EnableBulkDecompression)) = enable_bulk_decompression; /* * Vectorized quals must go into custom_exprs, because Postgres has to see @@ -1089,12 +1116,22 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat */ decompress_plan->custom_exprs = list_make1(vectorized_quals); - decompress_plan->custom_private = list_make6(settings, - dcpath->decompression_map, - dcpath->is_segmentby_column, - dcpath->bulk_decompression_column, - dcpath->aggregated_column_type, - sort_options); + decompress_plan->custom_private = ts_new_list(T_List, DCP_Count); + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_Settings)) = settings; + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_DecompressionMap)) = + context.decompression_map; + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_IsSegmentbyColumn)) = + context.is_segmentby_column; + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_BulkDecompressionColumn)) = + context.bulk_decompression_column; + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_SortInfo)) = sort_options; + + /* + * Note that our scan tuple type is uncompressed chunk tuple. This is the + * assumption of decompression map and generally of all decompression + * functions. + */ + decompress_plan->custom_scan_tlist = NIL; return &decompress_plan->scan.plan; } diff --git a/tsl/src/nodes/decompress_chunk/planner.h b/tsl/src/nodes/decompress_chunk/planner.h index 95098213bdb..11231c873b2 100644 --- a/tsl/src/nodes/decompress_chunk/planner.h +++ b/tsl/src/nodes/decompress_chunk/planner.h @@ -7,7 +7,28 @@ #include +typedef enum +{ + DCS_HypertableId = 0, + DCS_ChunkRelid = 1, + DCS_Reverse = 2, + DCS_BatchSortedMerge = 3, + DCS_EnableBulkDecompression = 4, + DCS_Count +} DecompressChunkSettingsIndex; + +typedef enum +{ + DCP_Settings = 0, + DCP_DecompressionMap = 1, + DCP_IsSegmentbyColumn = 2, + DCP_BulkDecompressionColumn = 3, + DCP_SortInfo = 4, + DCP_Count +} DecompressChunkPrivateIndex; + extern Plan *decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *path, - List *tlist, List *clauses, List *custom_plans); + List *output_targetlist, List *clauses, + List *custom_plans); extern void _decompress_chunk_init(void); diff --git a/tsl/src/nodes/vector_agg/CMakeLists.txt b/tsl/src/nodes/vector_agg/CMakeLists.txt new file mode 100644 index 00000000000..428e589d92e --- /dev/null +++ b/tsl/src/nodes/vector_agg/CMakeLists.txt @@ -0,0 +1,4 @@ +set(SOURCES + ${CMAKE_CURRENT_SOURCE_DIR}/exec.c ${CMAKE_CURRENT_SOURCE_DIR}/functions.c + ${CMAKE_CURRENT_SOURCE_DIR}/plan.c) +target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES}) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c new file mode 100644 index 00000000000..a212826caad --- /dev/null +++ b/tsl/src/nodes/vector_agg/exec.c @@ -0,0 +1,201 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include +#include +#include +#include +#include + +#include "exec.h" + +#include "compression/arrow_c_data_interface.h" +#include "functions.h" +#include "guc.h" +#include "nodes/decompress_chunk/compressed_batch.h" +#include "nodes/decompress_chunk/exec.h" +#include "nodes/vector_agg.h" + +static void +vector_agg_begin(CustomScanState *node, EState *estate, int eflags) +{ + CustomScan *cscan = castNode(CustomScan, node->ss.ps.plan); + node->custom_ps = + lappend(node->custom_ps, ExecInitNode(linitial(cscan->custom_plans), estate, eflags)); +} + +static void +vector_agg_end(CustomScanState *node) +{ + ExecEndNode(linitial(node->custom_ps)); +} + +static void +vector_agg_rescan(CustomScanState *node) +{ + if (node->ss.ps.chgParam != NULL) + UpdateChangedParamSet(linitial(node->custom_ps), node->ss.ps.chgParam); + + ExecReScan(linitial(node->custom_ps)); +} + +static TupleTableSlot * +vector_agg_exec(CustomScanState *vector_agg_state) +{ + DecompressChunkState *decompress_state = + (DecompressChunkState *) linitial(vector_agg_state->custom_ps); + + /* + * The aggregated targetlist with Aggrefs is in the custom scan targetlist + * of the custom scan node that is performing the vectorized aggregation. + * We do this to avoid projections at this node, because the postgres + * projection functions complain when they see an Aggref in a custom + * node output targetlist. + * The output targetlist, in turn, consists of just the INDEX_VAR references + * into the custom_scan_tlist. + */ + List *aggregated_tlist = castNode(CustomScan, vector_agg_state->ss.ps.plan)->custom_scan_tlist; + Assert(list_length(aggregated_tlist) == 1); + + /* Checked by planner */ + Assert(ts_guc_enable_vectorized_aggregation); + Assert(ts_guc_enable_bulk_decompression); + + /* Determine which kind of vectorized aggregation we should perform */ + TargetEntry *tlentry = (TargetEntry *) linitial(aggregated_tlist); + Assert(IsA(tlentry->expr, Aggref)); + Aggref *aggref = castNode(Aggref, tlentry->expr); + + Assert(list_length(aggref->args) == 1); + + /* The aggregate should be a partial aggregate */ + Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); + + Var *var = castNode(Var, castNode(TargetEntry, linitial(aggref->args))->expr); + + DecompressContext *dcontext = &decompress_state->decompress_context; + + CompressionColumnDescription *value_column_description = NULL; + for (int i = 0; i < dcontext->num_total_columns; i++) + { + CompressionColumnDescription *current_column = &dcontext->template_columns[i]; + if (current_column->output_attno == var->varattno) + { + value_column_description = current_column; + break; + } + } + Ensure(value_column_description != NULL, "aggregated compressed column not found"); + + Assert(value_column_description->type == COMPRESSED_COLUMN || + value_column_description->type == SEGMENTBY_COLUMN); + + BatchQueue *batch_queue = decompress_state->batch_queue; + DecompressBatchState *batch_state = batch_array_get_at(&batch_queue->batch_array, 0); + + /* Get a reference the the output TupleTableSlot */ + TupleTableSlot *aggregated_slot = vector_agg_state->ss.ps.ps_ResultTupleSlot; + Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); + + VectorAggregate *agg = get_vector_aggregate(aggref->aggfnoid); + Assert(agg != NULL); + + agg->agg_init(&aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); + ExecClearTuple(aggregated_slot); + + /* + * Have to skip the batches that are fully filtered out. This condition also + * handles the batch that was consumed on the previous step. + */ + while (batch_state->next_batch_row >= batch_state->total_batch_rows) + { + TupleTableSlot *compressed_slot = + ExecProcNode(linitial(decompress_state->csstate.custom_ps)); + + if (TupIsNull(compressed_slot)) + { + /* All values are processed. */ + return NULL; + } + + compressed_batch_set_compressed_tuple(dcontext, batch_state, compressed_slot); + } + + ArrowArray *arrow = NULL; + if (value_column_description->type == COMPRESSED_COLUMN) + { + Assert(dcontext->enable_bulk_decompression); + Assert(value_column_description->bulk_decompression_supported); + CompressedColumnValues *values = + &batch_state->compressed_columns[value_column_description - dcontext->template_columns]; + Assert(values->decompression_type != DT_Invalid); + arrow = values->arrow; + } + else + { + Assert(value_column_description->type == SEGMENTBY_COLUMN); + } + + if (arrow == NULL) + { + /* + * To calculate the sum for a segment by value or default compressed + * column value, we need to multiply this value with the number of + * passing decompressed tuples in this batch. + */ + int n = batch_state->total_batch_rows; + if (batch_state->vector_qual_result) + { + n = arrow_num_valid(batch_state->vector_qual_result, n); + Assert(n > 0); + } + + int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); + agg->agg_const(batch_state->decompressed_scan_slot_data.base.tts_values[offs], + batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], + n, + &aggregated_slot->tts_values[0], + &aggregated_slot->tts_isnull[0]); + } + else + { + agg->agg_vector(arrow, + batch_state->vector_qual_result, + &aggregated_slot->tts_values[0], + &aggregated_slot->tts_isnull[0]); + } + + compressed_batch_discard_tuples(batch_state); + + ExecStoreVirtualTuple(aggregated_slot); + + return aggregated_slot; +} + +static void +vector_agg_explain(CustomScanState *node, List *ancestors, ExplainState *es) +{ + /* No additional output is needed. */ +} + +static struct CustomExecMethods exec_methods = { + .CustomName = VECTOR_AGG_NODE_NAME, + .BeginCustomScan = vector_agg_begin, + .ExecCustomScan = vector_agg_exec, + .EndCustomScan = vector_agg_end, + .ReScanCustomScan = vector_agg_rescan, + .ExplainCustomScan = vector_agg_explain, +}; + +Node * +vector_agg_state_create(CustomScan *cscan) +{ + CustomScanState *state = makeNode(CustomScanState); + state->methods = &exec_methods; + return (Node *) state; +} diff --git a/tsl/src/partialize_agg.h b/tsl/src/nodes/vector_agg/exec.h similarity index 52% rename from tsl/src/partialize_agg.h rename to tsl/src/nodes/vector_agg/exec.h index b7fe1ba1d02..fb64dca6512 100644 --- a/tsl/src/partialize_agg.h +++ b/tsl/src/nodes/vector_agg/exec.h @@ -3,7 +3,16 @@ * Please see the included NOTICE for copyright information and * LICENSE-TIMESCALE for a copy of the license. */ + #pragma once -extern bool apply_vectorized_agg_optimization(PlannerInfo *root, AggPath *aggregation_path, - Path *subpath); +#include + +#include + +typedef struct VectorAggState +{ + CustomScanState custom; +} VectorAggState; + +extern Node *vector_agg_state_create(CustomScan *cscan); diff --git a/tsl/src/nodes/vector_agg/functions.c b/tsl/src/nodes/vector_agg/functions.c new file mode 100644 index 00000000000..fa9a185c25b --- /dev/null +++ b/tsl/src/nodes/vector_agg/functions.c @@ -0,0 +1,139 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include + +#include +#include + +#include "functions.h" + +#include "compat/compat.h" + +/* + * Vectorized implementation of int4_sum. + */ + +static void +int4_sum_init(Datum *agg_value, bool *agg_isnull) +{ + *agg_value = Int64GetDatum(0); + *agg_isnull = true; +} + +static void +int4_sum_vector(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull) +{ + Assert(vector != NULL); + Assert(vector->length > 0); + + /* + * We accumulate the sum as int64, so we can sum INT_MAX = 2^31 - 1 + * at least 2^31 times without incurring an overflow of the int64 + * accumulator. The same is true for negative numbers. The + * compressed batch size is currently capped at 1000 rows, but even + * if it's changed in the future, it's unlikely that we support + * batches larger than 65536 rows, not to mention 2^31. Therefore, + * we don't need to check for overflows within the loop, which would + * slow down the calculation. + */ + Assert(vector->length <= INT_MAX); + + int64 batch_sum = 0; + + /* + * This loop is not unrolled automatically, so do it manually as usual. + * The value buffer is padded to an even multiple of 64 bytes, i.e. to + * 64 / 4 = 16 elements. The bitmap is an even multiple of 64 elements. + * The number of elements in the inner loop must be less than both these + * values so that we don't go out of bounds. The particular value was + * chosen because it gives some speedup, and the larger values blow up + * the generated code with no performance benefit (checked on clang 16). + */ +#define INNER_LOOP_SIZE 4 + const int outer_boundary = pad_to_multiple(INNER_LOOP_SIZE, vector->length); + for (int outer = 0; outer < outer_boundary; outer += INNER_LOOP_SIZE) + { + for (int inner = 0; inner < INNER_LOOP_SIZE; inner++) + { + const int row = outer + inner; + const int32 arrow_value = ((int32 *) vector->buffers[1])[row]; + const bool passes_filter = filter ? arrow_row_is_valid(filter, row) : true; + batch_sum += passes_filter * arrow_value * arrow_row_is_valid(vector->buffers[0], row); + } + } +#undef INNER_LOOP_SIZE + + int64 tmp = DatumGetInt64(*agg_value); + if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) + { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + } + + /* + * Use Int64GetDatum to store the result since a 64-bit value is not + * pass-by-value on 32-bit systems. + */ + *agg_value = Int64GetDatum(tmp); + *agg_isnull = false; +} + +static void +int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool *agg_isnull) +{ + Assert(n > 0); + + if (constisnull) + { + return; + } + + int32 intvalue = DatumGetInt32(constvalue); + int64 batch_sum = 0; + + /* Multiply the number of tuples with the actual value */ + if (unlikely(pg_mul_s64_overflow(intvalue, n, &batch_sum))) + { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + } + + /* Add the value to our sum */ + int64 tmp = DatumGetInt64(*agg_value); + if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) + { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + } + + /* + * Use Int64GetDatum to store the result since a 64-bit value is not + * pass-by-value on 32-bit systems. + */ + *agg_value = Int64GetDatum(tmp); + *agg_isnull = false; +} + +static VectorAggregate int4_sum_agg = { + .agg_init = int4_sum_init, + .agg_const = int4_sum_const, + .agg_vector = int4_sum_vector, +}; + +VectorAggregate * +get_vector_aggregate(Oid aggfnoid) +{ + switch (aggfnoid) + { + case F_SUM_INT4: + return &int4_sum_agg; + default: + return NULL; + } +} diff --git a/tsl/src/nodes/vector_agg/functions.h b/tsl/src/nodes/vector_agg/functions.h new file mode 100644 index 00000000000..5f31fbf5960 --- /dev/null +++ b/tsl/src/nodes/vector_agg/functions.h @@ -0,0 +1,27 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#pragma once + +#include + +/* + * Function table for a vectorized implementation of an aggregate function. + */ +typedef struct +{ + /* Initialize the aggregate function state pointed to by agg_value and agg_isnull. */ + void (*agg_init)(Datum *agg_value, bool *agg_isnull); + + /* Aggregate a given arrow array. */ + void (*agg_vector)(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull); + + /* Aggregate a constant (like segmentby or column with default value). */ + void (*agg_const)(Datum constvalue, bool constisnull, int n, Datum *agg_value, + bool *agg_isnull); +} VectorAggregate; + +VectorAggregate *get_vector_aggregate(Oid aggfnoid); diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c new file mode 100644 index 00000000000..28514b47dfa --- /dev/null +++ b/tsl/src/nodes/vector_agg/plan.c @@ -0,0 +1,352 @@ + +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "plan.h" + +#include "exec.h" +#include "functions.h" +#include "nodes/decompress_chunk/planner.h" +#include "nodes/vector_agg.h" +#include "utils.h" + +static struct CustomScanMethods scan_methods = { .CustomName = VECTOR_AGG_NODE_NAME, + .CreateCustomScanState = vector_agg_state_create }; + +void +_vector_agg_init(void) +{ + TryRegisterCustomScanMethods(&scan_methods); +} + +/* + * Build an output targetlist for a custom node that just references all the + * custom scan targetlist entries. + */ +static inline List * +build_trivial_custom_output_targetlist(List *scan_targetlist) +{ + List *result = NIL; + + ListCell *lc; + foreach (lc, scan_targetlist) + { + TargetEntry *scan_entry = (TargetEntry *) lfirst(lc); + + Var *var = makeVar(INDEX_VAR, + scan_entry->resno, + exprType((Node *) scan_entry->expr), + exprTypmod((Node *) scan_entry->expr), + exprCollation((Node *) scan_entry->expr), + /* varlevelsup = */ 0); + + TargetEntry *output_entry = makeTargetEntry((Expr *) var, + scan_entry->resno, + scan_entry->resname, + scan_entry->resjunk); + + result = lappend(result, output_entry); + } + + return result; +} + +static Node * +resolve_outer_special_vars_mutator(Node *node, void *context) +{ + if (node == NULL) + { + return NULL; + } + + if (!IsA(node, Var)) + { + return expression_tree_mutator(node, resolve_outer_special_vars_mutator, context); + } + + Var *var = castNode(Var, node); + if (var->varno != OUTER_VAR) + { + return node; + } + + TargetEntry *decompress_chunk_tentry = + castNode(TargetEntry, list_nth(context, var->varattno - 1)); + Var *uncompressed_var = castNode(Var, decompress_chunk_tentry->expr); + return (Node *) copyObject(uncompressed_var); +} + +/* + * Resolve the OUTER_VAR special variables, that are used in the output + * targetlists of aggregation nodes, replacing them with the uncompressed chunk + * variables. + */ +static List * +resolve_outer_special_vars(List *agg_tlist, List *outer_tlist) +{ + return castNode(List, resolve_outer_special_vars_mutator((Node *) agg_tlist, outer_tlist)); +} + +/* + * Create a vectorized aggregation node to replace the given partial aggregation + * node. + */ +static Plan * +vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) +{ + CustomScan *custom = (CustomScan *) makeNode(CustomScan); + custom->custom_plans = list_make1(decompress_chunk); + custom->methods = &scan_methods; + + /* + * Note that this is being called from the post-planning hook, and therefore + * after set_plan_refs(). The meaning of output targetlists is different from + * the previous planning stages, and they contain special varnos referencing + * the scan targetlists. + */ + custom->custom_scan_tlist = + resolve_outer_special_vars(agg->plan.targetlist, decompress_chunk->scan.plan.targetlist); + custom->scan.plan.targetlist = + build_trivial_custom_output_targetlist(custom->custom_scan_tlist); + + /* + * Copy the costs from the normal aggregation node, so that they show up in + * the EXPLAIN output. They are not used for any other purposes, because + * this hook is called after the planning is finished. + */ + custom->scan.plan.plan_rows = agg->plan.plan_rows; + custom->scan.plan.plan_width = agg->plan.plan_width; + custom->scan.plan.startup_cost = agg->plan.startup_cost; + custom->scan.plan.total_cost = agg->plan.total_cost; + + custom->scan.plan.parallel_aware = false; + custom->scan.plan.parallel_safe = decompress_chunk->scan.plan.parallel_safe; + +#if PG14_GE + custom->scan.plan.async_capable = false; +#endif + + custom->scan.plan.plan_node_id = agg->plan.plan_node_id; + + Assert(agg->plan.qual == NIL); + + custom->scan.plan.initPlan = agg->plan.initPlan; + + custom->scan.plan.extParam = bms_copy(agg->plan.extParam); + custom->scan.plan.allParam = bms_copy(agg->plan.allParam); + + return (Plan *) custom; +} + +/* + * Where possible, replace the partial aggregation plan nodes with our own + * vectorized aggregation node. The replacement is done in-place. + */ +Plan * +try_insert_vector_agg_node(Plan *plan) +{ + if (plan->lefttree) + { + plan->lefttree = try_insert_vector_agg_node(plan->lefttree); + } + + if (plan->righttree) + { + plan->righttree = try_insert_vector_agg_node(plan->righttree); + } + + List *append_plans = NIL; + if (IsA(plan, Append)) + { + append_plans = castNode(Append, plan)->appendplans; + } + else if (IsA(plan, CustomScan)) + { + CustomScan *custom = castNode(CustomScan, plan); + if (strcmp("ChunkAppend", custom->methods->CustomName) == 0) + { + append_plans = custom->custom_plans; + } + } + + if (append_plans) + { + ListCell *lc; + foreach (lc, append_plans) + { + lfirst(lc) = try_insert_vector_agg_node(lfirst(lc)); + } + return plan; + } + + if (plan->type != T_Agg) + { + return plan; + } + + Agg *agg = castNode(Agg, plan); + + if (agg->aggsplit != AGGSPLIT_INITIAL_SERIAL) + { + /* Can only vectorize partial aggregation node. */ + return plan; + } + + if (agg->plan.lefttree == NULL) + { + /* + * Not sure what this would mean, but check for it just to be on the + * safe side because we can effectively see any possible plan here. + */ + return plan; + } + + if (!IsA(agg->plan.lefttree, CustomScan)) + { + /* + * Should have a Custom Scan under aggregation. + */ + return plan; + } + + CustomScan *custom = castNode(CustomScan, agg->plan.lefttree); + if (strcmp(custom->methods->CustomName, "DecompressChunk") != 0) + { + /* + * It should be our DecompressChunk node. + */ + return plan; + } + + if (custom->scan.plan.qual != NIL) + { + /* Can't do vectorized aggregation if we have Postgres quals. */ + return plan; + } + + if (agg->numCols != 0) + { + /* No GROUP BY support for now. */ + return plan; + } + + if (agg->groupingSets != NIL) + { + /* No GROUPING SETS support. */ + return plan; + } + + if (agg->plan.qual != NIL) + { + /* + * No HAVING support. Probably we can't have it in this node in any case, + * because we only replace the partial aggregation nodes which can't + * check the HAVING clause. + */ + return plan; + } + + if (list_length(agg->plan.targetlist) != 1) + { + /* We currently handle only one agg function per node. */ + return plan; + } + + Node *expr_node = (Node *) castNode(TargetEntry, linitial(agg->plan.targetlist))->expr; + Assert(IsA(expr_node, Aggref)); + + Aggref *aggref = castNode(Aggref, expr_node); + + if (aggref->aggfilter != NULL) + { + /* Filter clause on aggregate is not supported. */ + return plan; + } + + if (get_vector_aggregate(aggref->aggfnoid) == NULL) + { + return plan; + } + + TargetEntry *argument = castNode(TargetEntry, linitial(aggref->args)); + if (!IsA(argument->expr, Var)) + { + /* Can aggregate only a bare decompressed column, not an expression. */ + return plan; + } + Var *aggregated_var = castNode(Var, argument->expr); + + /* + * Check if this particular column is a segmentby or has bulk decompression + * enabled. This hook is called after set_plan_refs, and at this stage the + * output targetlist of the aggregation node uses OUTER_VAR references into + * the child scan targetlist, so first we have to translate this. + */ + Assert(aggregated_var->varno == OUTER_VAR); + TargetEntry *decompressed_target_entry = + list_nth(custom->scan.plan.targetlist, AttrNumberGetAttrOffset(aggregated_var->varattno)); + + if (!IsA(decompressed_target_entry->expr, Var)) + { + /* + * Can only aggregate the plain Vars. Not sure if this is redundant with + * the similar check above. + */ + return plan; + } + Var *decompressed_var = castNode(Var, decompressed_target_entry->expr); + + /* + * Now, we have to translate the decompressed varno into the compressed + * column index, to check if the column supports bulk decompression. + */ + List *decompression_map = list_nth(custom->custom_private, DCP_DecompressionMap); + List *is_segmentby_column = list_nth(custom->custom_private, DCP_IsSegmentbyColumn); + List *bulk_decompression_column = list_nth(custom->custom_private, DCP_BulkDecompressionColumn); + int compressed_column_index = 0; + for (; compressed_column_index < list_length(decompression_map); compressed_column_index++) + { + if (list_nth_int(decompression_map, compressed_column_index) == decompressed_var->varattno) + { + break; + } + } + Ensure(compressed_column_index < list_length(decompression_map), "compressed column not found"); + Assert(list_length(decompression_map) == list_length(bulk_decompression_column)); + const bool bulk_decompression_enabled_for_column = + list_nth_int(bulk_decompression_column, compressed_column_index); + + /* Bulk decompression can also be disabled globally. */ + List *settings = linitial(custom->custom_private); + const bool bulk_decompression_enabled_globally = + list_nth_int(settings, DCS_EnableBulkDecompression); + + /* + * We support vectorized aggregation either for segmentby columns or for + * columns with bulk decompression enabled. + */ + if (!list_nth_int(is_segmentby_column, compressed_column_index) && + !(bulk_decompression_enabled_for_column && bulk_decompression_enabled_globally)) + { + /* Vectorized aggregation not possible for this particular column. */ + return plan; + } + + /* + * Finally, all requirements are satisfied and we can vectorize this partial + * aggregation node. + */ + return vector_agg_plan_create(agg, custom); +} diff --git a/tsl/src/nodes/vector_agg/plan.h b/tsl/src/nodes/vector_agg/plan.h new file mode 100644 index 00000000000..653d9d1e1d0 --- /dev/null +++ b/tsl/src/nodes/vector_agg/plan.h @@ -0,0 +1,18 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include + +typedef struct VectorAggPlan +{ + CustomScan custom; +} VectorAggPlan; + +extern void _vector_agg_init(void); + +Plan *try_insert_vector_agg_node(Plan *plan); diff --git a/tsl/src/partialize_agg.c b/tsl/src/partialize_agg.c deleted file mode 100644 index 8644555e32a..00000000000 --- a/tsl/src/partialize_agg.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * This file and its contents are licensed under the Timescale License. - * Please see the included NOTICE for copyright information and - * LICENSE-TIMESCALE for a copy of the license. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "compression/compression.h" -#include "nodes/decompress_chunk/decompress_chunk.h" -#include "partialize_agg.h" -#include "utils.h" -#include "debug_assert.h" - -/* - * Are we able to optimize the path by applying vectorized aggregation? - */ -static bool -is_vectorizable_agg_path(PlannerInfo *root, AggPath *agg_path, Path *path) -{ - Assert(agg_path->aggstrategy == AGG_SORTED || agg_path->aggstrategy == AGG_PLAIN || - agg_path->aggstrategy == AGG_HASHED); - - /* Having is not supported at the moment */ - if (root->hasHavingQual) - return false; - - /* Only vectorizing within the decompress node is supported so far */ - bool is_decompress_chunk = ts_is_decompress_chunk_path(path); - if (!is_decompress_chunk) - return false; - -#ifdef USE_ASSERT_CHECKING - DecompressChunkPath *decompress_path = (DecompressChunkPath *) path; - Assert(decompress_path->custom_path.custom_paths != NIL); - - /* Hypertable compression info is already fetched from the catalog */ - Assert(decompress_path->info != NULL); -#endif - - /* No filters on the compressed attributes are supported at the moment */ - if ((list_length(path->parent->baserestrictinfo) > 0 || path->parent->joininfo != NULL)) - return false; - - /* We currently handle only one agg function per node */ - if (list_length(agg_path->path.pathtarget->exprs) != 1) - return false; - - /* Only sum on int 4 is supported at the moment */ - Node *expr_node = linitial(agg_path->path.pathtarget->exprs); - if (!IsA(expr_node, Aggref)) - return false; - - Aggref *aggref = castNode(Aggref, expr_node); - - /* Filter expressions in the aggregate are not supported */ - if (aggref->aggfilter != NULL) - return false; - - if (aggref->aggfnoid != F_SUM_INT4) - return false; - - /* Can aggregate only a bare decompressed column, not an expression. */ - TargetEntry *argument = castNode(TargetEntry, linitial(aggref->args)); - if (!IsA(argument->expr, Var)) - { - return false; - } - - return true; -} - -/* - * Check if we can perform the computation of the aggregate in a vectorized manner directly inside - * of the decompress chunk node. If this is possible, the decompress chunk node will emit partial - * aggregates directly, and there is no need for the PostgreSQL aggregation node on top. - */ -bool -apply_vectorized_agg_optimization(PlannerInfo *root, AggPath *aggregation_path, Path *path) -{ - if (!ts_guc_enable_vectorized_aggregation || !ts_guc_enable_bulk_decompression) - return false; - - Assert(path != NULL); - Assert(aggregation_path->aggsplit == AGGSPLIT_INITIAL_SERIAL); - - if (is_vectorizable_agg_path(root, aggregation_path, path)) - { - Assert(ts_is_decompress_chunk_path(path)); - DecompressChunkPath *decompress_path = (DecompressChunkPath *) castNode(CustomPath, path); - - /* Change the output of the path and let the decompress chunk node emit partial aggregates - * directly */ - decompress_path->perform_vectorized_aggregation = true; - decompress_path->custom_path.path.pathtarget = aggregation_path->path.pathtarget; - - /* The decompress chunk node can perform the aggregation directly. No need for a dedicated - * agg node on top. */ - return true; - } - - /* PostgreSQL should handle the aggregation. Regular agg node on top is required. */ - return false; -} diff --git a/tsl/src/planner.c b/tsl/src/planner.c index e90c4651016..c92084923ee 100644 --- a/tsl/src/planner.c +++ b/tsl/src/planner.c @@ -22,6 +22,7 @@ #include "nodes/frozen_chunk_dml/frozen_chunk_dml.h" #include "nodes/decompress_chunk/decompress_chunk.h" #include "nodes/gapfill/gapfill.h" +#include "nodes/vector_agg/plan.h" #include "planner.h" #include @@ -202,3 +203,15 @@ tsl_preprocess_query(Query *parse) constify_cagg_watermark(parse); } } + +/* + * Run plan postprocessing optimizations. + */ +void +tsl_postprocess_plan(PlannedStmt *stmt) +{ + if (ts_guc_enable_vectorized_aggregation) + { + stmt->planTree = try_insert_vector_agg_node(stmt->planTree); + } +} diff --git a/tsl/src/planner.h b/tsl/src/planner.h index d37c32c3bf2..1a53468e872 100644 --- a/tsl/src/planner.h +++ b/tsl/src/planner.h @@ -17,3 +17,4 @@ void tsl_set_rel_pathlist_query(PlannerInfo *, RelOptInfo *, Index, RangeTblEntr void tsl_set_rel_pathlist_dml(PlannerInfo *, RelOptInfo *, Index, RangeTblEntry *, Hypertable *); void tsl_set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte); void tsl_preprocess_query(Query *parse); +void tsl_postprocess_plan(PlannedStmt *stmt); diff --git a/tsl/test/expected/decompress_vector_qual.out b/tsl/test/expected/decompress_vector_qual.out index 8e77da74fc6..d62b2d4f935 100644 --- a/tsl/test/expected/decompress_vector_qual.out +++ b/tsl/test/expected/decompress_vector_qual.out @@ -2,7 +2,7 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. \c :TEST_DBNAME :ROLE_SUPERUSER -create function stable_identity(x anyelement) returns anyelement as $$ select x $$ language sql stable; +create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; create table vectorqual(metric1 int8, ts timestamp, metric2 int8, device int8); select create_hypertable('vectorqual', 'ts'); WARNING: column type "timestamp without time zone" used for "ts" does not follow best practices @@ -206,7 +206,7 @@ execute p(33); deallocate p; -- Also try query parameter in combination with a stable function. -prepare p(int4) as select count(*) from vectorqual where metric3 = stable_identity($1); +prepare p(int4) as select count(*) from vectorqual where metric3 = stable_abs($1); execute p(33); count ------- @@ -387,7 +387,7 @@ select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or me 5 (1 row) -select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! stable_identity(888)); +select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! stable_abs(888)); count ------- 5 @@ -465,7 +465,7 @@ select count(*) from vectorqual where metric3 = 777 or metric4 is not null; 4 (1 row) -select count(*) from vectorqual where metric3 = stable_identity(777) or metric4 is null; +select count(*) from vectorqual where metric3 = stable_abs(777) or metric4 is null; count ------- 3 diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out new file mode 100644 index 00000000000..58e8185f7e9 --- /dev/null +++ b/tsl/test/expected/vector_agg_default.out @@ -0,0 +1,167 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; +create table dvagg(a int, b int); +select create_hypertable('dvagg', 'a', chunk_time_interval => 1000); +NOTICE: adding not-null constraint to column "a" + create_hypertable +-------------------- + (1,public,dvagg,t) +(1 row) + +insert into dvagg select x, x % 5 from generate_series(1, 999) x; +alter table dvagg set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "dvagg" is set to "" +NOTICE: default order by for hypertable "dvagg" is set to "a DESC" +select compress_chunk(show_chunks('dvagg')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +alter table dvagg add column c int default 7; +insert into dvagg select x, x % 5, 11 from generate_series(1001, 1999) x; +select compress_chunk(show_chunks('dvagg')); +NOTICE: chunk "_hyper_1_1_chunk" is already compressed + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_3_chunk +(2 rows) + +-- Just the most basic vectorized aggregation query on a table with default +-- compressed column. +explain (costs off) select sum(c) from dvagg; + QUERY PLAN +----------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Parallel Seq Scan on compress_hyper_2_2_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Parallel Seq Scan on compress_hyper_2_4_chunk +(10 rows) + +select sum(c) from dvagg; + sum +------- + 17982 +(1 row) + +-- Vectorized aggregation should work with vectorized filters. +select sum(c) from dvagg where b >= 0; + sum +------- + 17982 +(1 row) + +select sum(c) from dvagg where b = 0; + sum +------ + 3582 +(1 row) + +select sum(c) from dvagg where b in (0, 1); + sum +------ + 7182 +(1 row) + +select sum(c) from dvagg where b in (0, 1, 3); + sum +------- + 10782 +(1 row) + +select sum(c) from dvagg where b > 10; + sum +----- + +(1 row) + +explain (costs off) select sum(c) from dvagg where b in (0, 1, 3); + QUERY PLAN +------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + Vectorized Filter: (b = ANY ('{0,1,3}'::integer[])) + -> Parallel Seq Scan on compress_hyper_2_2_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + Vectorized Filter: (b = ANY ('{0,1,3}'::integer[])) + -> Parallel Seq Scan on compress_hyper_2_4_chunk +(12 rows) + +-- The runtime chunk exclusion should work. +explain (costs off) select sum(c) from dvagg where a < stable_abs(1000); + QUERY PLAN +--------------------------------------------------------------------- + Finalize Aggregate + -> Custom Scan (ChunkAppend) on dvagg + Chunks excluded during startup: 1 + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + Vectorized Filter: (a < stable_abs(1000)) + -> Seq Scan on compress_hyper_2_2_chunk +(7 rows) + +-- Some negative cases. +explain (costs off) select sum(c) from dvagg group by grouping sets ((), (a)); + QUERY PLAN +----------------------------------------------------------------------------------- + MixedAggregate + Hash Key: _hyper_1_1_chunk.a + Group Key: () + -> Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Sort + Sort Key: compress_hyper_2_2_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_2_2_chunk + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Sort + Sort Key: compress_hyper_2_4_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_2_4_chunk +(12 rows) + +explain (costs off) select sum(c) from dvagg having sum(c) > 0; + QUERY PLAN +----------------------------------------------------------------------------- + Finalize Aggregate + Filter: (sum(_hyper_1_1_chunk.c) > 0) + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Parallel Seq Scan on compress_hyper_2_2_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Parallel Seq Scan on compress_hyper_2_4_chunk +(11 rows) + +-- As a reference, the result on decompressed table. +select decompress_chunk(show_chunks('dvagg')); + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_3_chunk +(2 rows) + +select sum(c) from dvagg; + sum +------- + 17982 +(1 row) + +drop table dvagg; diff --git a/tsl/test/expected/vector_agg_param.out b/tsl/test/expected/vector_agg_param.out new file mode 100644 index 00000000000..b481d9c8a97 --- /dev/null +++ b/tsl/test/expected/vector_agg_param.out @@ -0,0 +1,50 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Test parameterized vector aggregation plans. +create table pvagg(s int, a int); +select create_hypertable('pvagg', 'a', chunk_time_interval => 1000); +NOTICE: adding not-null constraint to column "a" + create_hypertable +-------------------- + (1,public,pvagg,t) +(1 row) + +insert into pvagg select 1, generate_series(1, 999); +insert into pvagg select 2, generate_series(1001, 1999); +alter table pvagg set (timescaledb.compress, timescaledb.compress_segmentby = 's'); +NOTICE: default order by for hypertable "pvagg" is set to "a DESC" +select count(compress_chunk(x)) from show_chunks('pvagg') x; + count +------- + 2 +(1 row) + +analyze pvagg; +explain (costs off) +select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg where s = x) xx; + QUERY PLAN +--------------------------------------------------------------------------- + Nested Loop + -> Function Scan on unnest x + -> Finalize Aggregate + -> Custom Scan (ChunkAppend) on pvagg + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Seq Scan on compress_hyper_2_3_chunk + Filter: (s = x.x) + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk + -> Seq Scan on compress_hyper_2_4_chunk + Filter: (s = x.x) +(12 rows) + +select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg where s = x) xx; + x | sum +---+--------- + 0 | + 1 | 499500 + 2 | 1498500 +(3 rows) + +drop table pvagg; diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 552569895ad..87a9df6a150 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -49,82 +49,91 @@ SELECT sum(segment_by_value) FROM testtable; :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value -(46 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value +(52 rows) -- Vectorization possible - filter on segment_by :EXPLAIN SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_13_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - Index Cond: (compress_hyper_2_13_chunk.segment_by_value > 0) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Index Scan using compress_hyper_2_13_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + Index Cond: (compress_hyper_2_13_chunk.segment_by_value > 0) -> Partial Aggregate Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk @@ -160,7 +169,7 @@ SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0; -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.segment_by_value Filter: (_hyper_1_10_chunk.segment_by_value > 0) -(56 rows) +(59 rows) -- Vectorization not possible due to a used filter :EXPLAIN @@ -170,24 +179,24 @@ SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0 AND int_v Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) -> Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) @@ -241,22 +250,22 @@ SELECT sum(segment_by_value) FROM testtable WHERE int_value > 0; Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) @@ -309,22 +318,22 @@ SELECT sum(segment_by_value) FROM testtable WHERE float_value > 0; Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.float_value > '0'::double precision) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.float_value > '0'::double precision) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.float_value > '0'::double precision) @@ -651,55 +660,61 @@ SELECT sum(int_value) FROM testtable; :EXPLAIN SELECT sum(int_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.int_value -(46 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value +(52 rows) -- Vectorized aggregation not possible SELECT sum(float_value) FROM testtable; @@ -796,62 +811,75 @@ SELECT sum(segment_by_value) FROM testtable; :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(53 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value +(66 rows) -- Vectorized aggregation possible SELECT sum(int_value) FROM testtable; @@ -862,62 +890,75 @@ SELECT sum(int_value) FROM testtable; :EXPLAIN SELECT sum(int_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(53 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value +(66 rows) --- -- Tests with some chunks are partially compressed @@ -933,66 +974,79 @@ SELECT sum(segment_by_value) FROM testtable; :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(57 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value +(70 rows) -- Vectorized aggregation possible SELECT sum(int_value) FROM testtable; @@ -1003,66 +1057,79 @@ SELECT sum(int_value) FROM testtable; :EXPLAIN SELECT sum(int_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.int_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(57 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value +(70 rows) --Vectorized aggregation not possible for expression SELECT sum(abs(int_value)) FROM testtable; @@ -1289,150 +1356,176 @@ SELECT sum(int_value) FROM testtable; -> Partial Aggregate Output: PARTIAL sum(_hyper_1_9_chunk.int_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.int_value + Output: _hyper_1_9_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.int_value) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value +(70 rows) + +RESET timescaledb.enable_bulk_decompression; +-- Using the same sum function multiple times is supported by vectorization +:EXPLAIN +SELECT sum(int_value), sum(int_value) FROM testtable; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize Aggregate + Output: sum(_hyper_1_1_chunk.int_value), sum(_hyper_1_1_chunk.int_value) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value +(70 rows) + +-- Using the same sum function multiple times is supported by vectorization +:EXPLAIN +SELECT sum(segment_by_value), sum(segment_by_value) FROM testtable; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize Aggregate + Output: sum(_hyper_1_1_chunk.segment_by_value), sum(_hyper_1_1_chunk.segment_by_value) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.int_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.int_value + Output: _hyper_1_10_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.int_value) + Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.int_value + Output: _hyper_1_1_chunk.segment_by_value (70 rows) -RESET timescaledb.enable_bulk_decompression; --- Using the same sum function multiple times is supported by vectorization -:EXPLAIN -SELECT sum(int_value), sum(int_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Finalize Aggregate - Output: sum(_hyper_1_1_chunk.int_value), sum(_hyper_1_1_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.int_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(57 rows) - --- Using the same sum function multiple times is supported by vectorization -:EXPLAIN -SELECT sum(segment_by_value), sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Finalize Aggregate - Output: sum(_hyper_1_1_chunk.segment_by_value), sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(57 rows) - -- Performing a sum on multiple columns is currently not supported by vectorization :EXPLAIN SELECT sum(int_value), sum(segment_by_value) FROM testtable; @@ -1763,72 +1856,85 @@ SELECT sum(int_value) FROM testtable; -- Vectorization possible - filter on segment_by :EXPLAIN SELECT sum(int_value) FROM testtable WHERE segment_by_value > 5; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_41_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk - Output: (PARTIAL sum(_hyper_1_41_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_51_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_51_chunk - Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value - Index Cond: (compress_hyper_2_51_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk - Output: (PARTIAL sum(_hyper_1_42_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_52_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_52_chunk - Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value - Index Cond: (compress_hyper_2_52_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk - Output: (PARTIAL sum(_hyper_1_43_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_53_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_53_chunk - Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value - Index Cond: (compress_hyper_2_53_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk - Output: (PARTIAL sum(_hyper_1_44_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_54_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_54_chunk - Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value - Index Cond: (compress_hyper_2_54_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk - Output: (PARTIAL sum(_hyper_1_45_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_55_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_55_chunk - Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value - Index Cond: (compress_hyper_2_55_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk - Output: (PARTIAL sum(_hyper_1_46_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_56_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_56_chunk - Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value - Index Cond: (compress_hyper_2_56_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk - Output: (PARTIAL sum(_hyper_1_47_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_57_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_57_chunk - Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value - Index Cond: (compress_hyper_2_57_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk - Output: (PARTIAL sum(_hyper_1_48_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_58_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_58_chunk - Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value - Index Cond: (compress_hyper_2_58_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk - Output: (PARTIAL sum(_hyper_1_49_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_59_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_59_chunk - Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value - Index Cond: (compress_hyper_2_59_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk - Output: (PARTIAL sum(_hyper_1_50_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_60_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_60_chunk - Output: compress_hyper_2_60_chunk._ts_meta_count, compress_hyper_2_60_chunk._ts_meta_sequence_num, compress_hyper_2_60_chunk.segment_by_value, compress_hyper_2_60_chunk._ts_meta_min_1, compress_hyper_2_60_chunk._ts_meta_max_1, compress_hyper_2_60_chunk."time", compress_hyper_2_60_chunk.int_value, compress_hyper_2_60_chunk.float_value - Index Cond: (compress_hyper_2_60_chunk.segment_by_value > 5) -(63 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_41_chunk.int_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_41_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk + Output: _hyper_1_41_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk + Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value + Filter: (compress_hyper_2_51_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_42_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk + Output: _hyper_1_42_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk + Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value + Filter: (compress_hyper_2_52_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_43_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk + Output: _hyper_1_43_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk + Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value + Filter: (compress_hyper_2_53_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_44_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk + Output: _hyper_1_44_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk + Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value + Filter: (compress_hyper_2_54_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_45_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk + Output: _hyper_1_45_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk + Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value + Filter: (compress_hyper_2_55_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_46_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk + Output: _hyper_1_46_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk + Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value + Filter: (compress_hyper_2_56_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_47_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk + Output: _hyper_1_47_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk + Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value + Filter: (compress_hyper_2_57_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_48_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk + Output: _hyper_1_48_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk + Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value + Filter: (compress_hyper_2_58_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_49_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk + Output: _hyper_1_49_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk + Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value + Filter: (compress_hyper_2_59_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_50_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk + Output: _hyper_1_50_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk + Output: compress_hyper_2_60_chunk._ts_meta_count, compress_hyper_2_60_chunk._ts_meta_sequence_num, compress_hyper_2_60_chunk.segment_by_value, compress_hyper_2_60_chunk._ts_meta_min_1, compress_hyper_2_60_chunk._ts_meta_max_1, compress_hyper_2_60_chunk."time", compress_hyper_2_60_chunk.int_value, compress_hyper_2_60_chunk.float_value + Filter: (compress_hyper_2_60_chunk.segment_by_value > 5) +(76 rows) SELECT sum(int_value) FROM testtable WHERE segment_by_value > 5; sum @@ -1867,65 +1973,75 @@ SET parallel_setup_cost = 0; SET parallel_tuple_cost = 0; :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_41_chunk.segment_by_value) -> Gather Output: (PARTIAL sum(_hyper_1_41_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_41_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk - Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk + Output: _hyper_1_41_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk + Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_42_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk - Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk + Output: _hyper_1_42_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk + Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_43_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk - Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk + Output: _hyper_1_43_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk + Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_44_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk - Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk + Output: _hyper_1_44_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk + Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_45_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk - Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk + Output: _hyper_1_45_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk + Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_46_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk - Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk + Output: _hyper_1_46_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk + Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_47_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk - Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk + Output: _hyper_1_47_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk + Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_48_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk - Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk + Output: _hyper_1_48_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk + Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_49_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk - Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk + Output: _hyper_1_49_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk + Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value + -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_50_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk - Output: compress_hyper_2_60_chunk._ts_meta_count, compress_hyper_2_60_chunk._ts_meta_sequence_num, compress_hyper_2_60_chunk.segment_by_value, compress_hyper_2_60_chunk._ts_meta_min_1, compress_hyper_2_60_chunk._ts_meta_max_1, compress_hyper_2_60_chunk."time", compress_hyper_2_60_chunk.int_value, compress_hyper_2_60_chunk.float_value -(56 rows) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk + Output: _hyper_1_50_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk + Output: compress_hyper_2_60_chunk._ts_meta_count, compress_hyper_2_60_chunk._ts_meta_sequence_num, compress_hyper_2_60_chunk.segment_by_value, compress_hyper_2_60_chunk._ts_meta_min_1, compress_hyper_2_60_chunk._ts_meta_max_1, compress_hyper_2_60_chunk."time", compress_hyper_2_60_chunk.int_value, compress_hyper_2_60_chunk.float_value +(66 rows) SELECT sum(segment_by_value) FROM testtable; sum @@ -2042,121 +2158,147 @@ SELECT compress_chunk(ch) FROM show_chunks('testtable') ch; -- Aggregation with vectorization :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_81_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk - Output: (PARTIAL sum(_hyper_1_81_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk - Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk - Output: (PARTIAL sum(_hyper_1_82_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk - Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk - Output: (PARTIAL sum(_hyper_1_83_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk - Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk - Output: (PARTIAL sum(_hyper_1_84_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk - Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk - Output: (PARTIAL sum(_hyper_1_85_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk - Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk - Output: (PARTIAL sum(_hyper_1_86_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk - Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk - Output: (PARTIAL sum(_hyper_1_87_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk - Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk - Output: (PARTIAL sum(_hyper_1_88_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk - Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk - Output: (PARTIAL sum(_hyper_1_89_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk - Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk - Output: (PARTIAL sum(_hyper_1_90_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk - Output: compress_hyper_2_100_chunk._ts_meta_count, compress_hyper_2_100_chunk._ts_meta_sequence_num, compress_hyper_2_100_chunk.segment_by_value, compress_hyper_2_100_chunk._ts_meta_min_1, compress_hyper_2_100_chunk._ts_meta_max_1, compress_hyper_2_100_chunk."time", compress_hyper_2_100_chunk.int_value, compress_hyper_2_100_chunk.float_value -(53 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_81_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_81_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk + Output: _hyper_1_81_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk + Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_82_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk + Output: _hyper_1_82_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk + Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_83_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk + Output: _hyper_1_83_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk + Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_84_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk + Output: _hyper_1_84_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk + Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_85_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk + Output: _hyper_1_85_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk + Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_86_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk + Output: _hyper_1_86_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk + Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_87_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk + Output: _hyper_1_87_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk + Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_88_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk + Output: _hyper_1_88_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk + Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_89_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk + Output: _hyper_1_89_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk + Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_90_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk + Output: _hyper_1_90_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk + Output: compress_hyper_2_100_chunk._ts_meta_count, compress_hyper_2_100_chunk._ts_meta_sequence_num, compress_hyper_2_100_chunk.segment_by_value, compress_hyper_2_100_chunk._ts_meta_min_1, compress_hyper_2_100_chunk._ts_meta_max_1, compress_hyper_2_100_chunk."time", compress_hyper_2_100_chunk.int_value, compress_hyper_2_100_chunk.float_value +(66 rows) :EXPLAIN SELECT sum(int_value) FROM testtable; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_81_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk - Output: (PARTIAL sum(_hyper_1_81_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk - Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk - Output: (PARTIAL sum(_hyper_1_82_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk - Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk - Output: (PARTIAL sum(_hyper_1_83_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk - Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk - Output: (PARTIAL sum(_hyper_1_84_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk - Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk - Output: (PARTIAL sum(_hyper_1_85_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk - Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk - Output: (PARTIAL sum(_hyper_1_86_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk - Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk - Output: (PARTIAL sum(_hyper_1_87_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk - Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk - Output: (PARTIAL sum(_hyper_1_88_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk - Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk - Output: (PARTIAL sum(_hyper_1_89_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk - Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk - Output: (PARTIAL sum(_hyper_1_90_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk - Output: compress_hyper_2_100_chunk._ts_meta_count, compress_hyper_2_100_chunk._ts_meta_sequence_num, compress_hyper_2_100_chunk.segment_by_value, compress_hyper_2_100_chunk._ts_meta_min_1, compress_hyper_2_100_chunk._ts_meta_max_1, compress_hyper_2_100_chunk."time", compress_hyper_2_100_chunk.int_value, compress_hyper_2_100_chunk.float_value -(53 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_81_chunk.int_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_81_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk + Output: _hyper_1_81_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk + Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_82_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk + Output: _hyper_1_82_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk + Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_83_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk + Output: _hyper_1_83_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk + Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_84_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk + Output: _hyper_1_84_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk + Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_85_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk + Output: _hyper_1_85_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk + Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_86_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk + Output: _hyper_1_86_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk + Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_87_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk + Output: _hyper_1_87_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk + Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_88_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk + Output: _hyper_1_88_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk + Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_89_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk + Output: _hyper_1_89_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk + Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_90_chunk.int_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk + Output: _hyper_1_90_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk + Output: compress_hyper_2_100_chunk._ts_meta_count, compress_hyper_2_100_chunk._ts_meta_sequence_num, compress_hyper_2_100_chunk.segment_by_value, compress_hyper_2_100_chunk._ts_meta_min_1, compress_hyper_2_100_chunk._ts_meta_max_1, compress_hyper_2_100_chunk."time", compress_hyper_2_100_chunk.int_value, compress_hyper_2_100_chunk.float_value +(66 rows) SELECT sum(segment_by_value) FROM testtable; sum @@ -2279,7 +2421,7 @@ value2 AS segment_by_value2, value1 AS int_value, value1 AS float_value FROM -generate_series('1980-01-01 00:00:00-00', '1980-03-01 00:00:00-00', INTERVAL '1 day') AS g1(time), +generate_series('1980-01-03 00:00:00-00', '1980-03-04 00:00:00-00', INTERVAL '1 day') AS g1(time), generate_series(-10, 25, 1) AS g2(value1), generate_series(-30, 20, 1) AS g3(value2) ORDER BY time; @@ -2287,7 +2429,7 @@ ORDER BY time; SELECT sum(segment_by_value1), sum(segment_by_value2) FROM testtable2; sum | sum --------+--------- - 839970 | -559980 + 853740 | -569160 (1 row) SELECT compress_chunk(ch) FROM show_chunks('testtable2') ch; @@ -2302,346 +2444,378 @@ SELECT compress_chunk(ch) FROM show_chunks('testtable2') ch; _timescaledb_internal._hyper_3_107_chunk _timescaledb_internal._hyper_3_108_chunk _timescaledb_internal._hyper_3_109_chunk - _timescaledb_internal._hyper_3_110_chunk -(10 rows) +(9 rows) +ANALYZE testtable2; :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_3_101_chunk.segment_by_value1) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value -(53 rows) + -> Gather + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value +(60 rows) SELECT sum(segment_by_value1) FROM testtable2; sum -------- - 839970 + 853740 (1 row) :EXPLAIN SELECT sum(segment_by_value2) FROM testtable2; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_3_101_chunk.segment_by_value2) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value -(53 rows) + -> Gather + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value2)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value2)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value2)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value2)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value2)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value2)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value2)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value2)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value2)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value2)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value +(60 rows) SELECT sum(segment_by_value2) FROM testtable2; sum --------- - -559980 + -569160 (1 row) -- Vectorization possible - filter on segment_by :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_3_101_chunk.segment_by_value1) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Index Cond: (compress_hyper_4_111_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Index Cond: (compress_hyper_4_112_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Index Cond: (compress_hyper_4_113_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Index Cond: (compress_hyper_4_114_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Index Cond: (compress_hyper_4_115_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Index Cond: (compress_hyper_4_116_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Index Cond: (compress_hyper_4_117_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Index Cond: (compress_hyper_4_118_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Index Cond: (compress_hyper_4_119_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_120_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Index Cond: (compress_hyper_4_120_chunk.segment_by_value1 > 0) -(63 rows) + -> Gather + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + Filter: (compress_hyper_4_110_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Filter: (compress_hyper_4_111_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Filter: (compress_hyper_4_112_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Filter: (compress_hyper_4_113_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Filter: (compress_hyper_4_114_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Filter: (compress_hyper_4_115_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Filter: (compress_hyper_4_116_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Filter: (compress_hyper_4_117_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Filter: (compress_hyper_4_118_chunk.segment_by_value1 > 0) +(69 rows) :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND segment_by_value2 > 0; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_3_101_chunk.segment_by_value1) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Index Cond: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Index Cond: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Index Cond: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Index Cond: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Index Cond: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Index Cond: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Index Cond: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Index Cond: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Index Cond: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_120_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Index Cond: ((compress_hyper_4_120_chunk.segment_by_value1 > 0) AND (compress_hyper_4_120_chunk.segment_by_value2 > 0)) -(63 rows) + -> Gather + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + Filter: ((compress_hyper_4_110_chunk.segment_by_value1 > 0) AND (compress_hyper_4_110_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) +(69 rows) :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND segment_by_value2 > 0 AND 2>1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_3_101_chunk.segment_by_value1) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Index Cond: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Index Cond: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Index Cond: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Index Cond: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Index Cond: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Index Cond: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Index Cond: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Index Cond: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Index Cond: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_120_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Index Cond: ((compress_hyper_4_120_chunk.segment_by_value1 > 0) AND (compress_hyper_4_120_chunk.segment_by_value2 > 0)) -(63 rows) + -> Gather + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + Filter: ((compress_hyper_4_110_chunk.segment_by_value1 > 0) AND (compress_hyper_4_110_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) +(69 rows) -- Vectorization not possible filter on segment_by and compressed value -- Disable parallel worker to get deterministic query plans on i386 @@ -2653,86 +2827,78 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 1000 AND Finalize Aggregate Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_101_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_101_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Index Cond: (compress_hyper_4_111_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_102_chunk.segment_by_value1) + -> Index Scan using compress_hyper_4_110_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + Index Cond: (compress_hyper_4_110_chunk.segment_by_value1 > 1000) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_102_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Index Cond: (compress_hyper_4_112_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_103_chunk.segment_by_value1) + -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Index Cond: (compress_hyper_4_111_chunk.segment_by_value1 > 1000) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_103_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Index Cond: (compress_hyper_4_113_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_104_chunk.segment_by_value1) + -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Index Cond: (compress_hyper_4_112_chunk.segment_by_value1 > 1000) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_104_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Index Cond: (compress_hyper_4_114_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_105_chunk.segment_by_value1) + -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Index Cond: (compress_hyper_4_113_chunk.segment_by_value1 > 1000) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_105_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Index Cond: (compress_hyper_4_115_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_106_chunk.segment_by_value1) + -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Index Cond: (compress_hyper_4_114_chunk.segment_by_value1 > 1000) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_106_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Index Cond: (compress_hyper_4_116_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_107_chunk.segment_by_value1) + -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Index Cond: (compress_hyper_4_115_chunk.segment_by_value1 > 1000) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_107_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Index Cond: (compress_hyper_4_117_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_108_chunk.segment_by_value1) + -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Index Cond: (compress_hyper_4_116_chunk.segment_by_value1 > 1000) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_108_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Index Cond: (compress_hyper_4_118_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_109_chunk.segment_by_value1) + -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Index Cond: (compress_hyper_4_117_chunk.segment_by_value1 > 1000) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_109_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Index Cond: (compress_hyper_4_119_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_110_chunk.segment_by_value1) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: _hyper_3_110_chunk.segment_by_value1 - Vectorized Filter: (_hyper_3_110_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_120_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Index Cond: (compress_hyper_4_120_chunk.segment_by_value1 > 1000) -(83 rows) + -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Index Cond: (compress_hyper_4_118_chunk.segment_by_value1 > 1000) +(75 rows) RESET max_parallel_workers_per_gather; diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 2893addf957..4cbb61570fd 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -33,9 +33,11 @@ set(TEST_FILES partialize_finalize.sql policy_generalization.sql reorder.sql - skip_scan.sql size_utils_tsl.sql + skip_scan.sql transparent_decompression_join_index.sql + vector_agg_default.sql + vector_agg_param.sql vectorized_aggregation.sql) if(USE_TELEMETRY) diff --git a/tsl/test/sql/decompress_vector_qual.sql b/tsl/test/sql/decompress_vector_qual.sql index 074c546282e..1ec27e2979f 100644 --- a/tsl/test/sql/decompress_vector_qual.sql +++ b/tsl/test/sql/decompress_vector_qual.sql @@ -4,7 +4,7 @@ \c :TEST_DBNAME :ROLE_SUPERUSER -create function stable_identity(x anyelement) returns anyelement as $$ select x $$ language sql stable; +create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; create table vectorqual(metric1 int8, ts timestamp, metric2 int8, device int8); select create_hypertable('vectorqual', 'ts'); @@ -89,7 +89,7 @@ execute p(33); deallocate p; -- Also try query parameter in combination with a stable function. -prepare p(int4) as select count(*) from vectorqual where metric3 = stable_identity($1); +prepare p(int4) as select count(*) from vectorqual where metric3 = stable_abs($1); execute p(33); deallocate p; @@ -164,7 +164,7 @@ select count(*) from vectorqual where metric3 !!! 777; select count(*) from vectorqual where metric3 !!! any(array[777, 888]); select count(*) from vectorqual where metric3 !!! 777 or metric3 !!! 888; select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! 888); -select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! stable_identity(888)); +select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! stable_abs(888)); set timescaledb.debug_require_vector_qual to 'forbid'; select count(*) from vectorqual where not metric3 !!! 777; @@ -187,7 +187,7 @@ set timescaledb.debug_require_vector_qual to 'only'; select count(*) from vectorqual where metric4 is null; select count(*) from vectorqual where metric4 is not null; select count(*) from vectorqual where metric3 = 777 or metric4 is not null; -select count(*) from vectorqual where metric3 = stable_identity(777) or metric4 is null; +select count(*) from vectorqual where metric3 = stable_abs(777) or metric4 is null; -- Can't vectorize conditions on system columns. Have to check this on a single diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql new file mode 100644 index 00000000000..71b8ea7ac2a --- /dev/null +++ b/tsl/test/sql/vector_agg_default.sql @@ -0,0 +1,50 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +\c :TEST_DBNAME :ROLE_SUPERUSER +create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; + +create table dvagg(a int, b int); +select create_hypertable('dvagg', 'a', chunk_time_interval => 1000); + +insert into dvagg select x, x % 5 from generate_series(1, 999) x; +alter table dvagg set (timescaledb.compress); +select compress_chunk(show_chunks('dvagg')); + +alter table dvagg add column c int default 7; +insert into dvagg select x, x % 5, 11 from generate_series(1001, 1999) x; +select compress_chunk(show_chunks('dvagg')); + + +-- Just the most basic vectorized aggregation query on a table with default +-- compressed column. +explain (costs off) select sum(c) from dvagg; +select sum(c) from dvagg; + + +-- Vectorized aggregation should work with vectorized filters. +select sum(c) from dvagg where b >= 0; +select sum(c) from dvagg where b = 0; +select sum(c) from dvagg where b in (0, 1); +select sum(c) from dvagg where b in (0, 1, 3); +select sum(c) from dvagg where b > 10; + +explain (costs off) select sum(c) from dvagg where b in (0, 1, 3); + + +-- The runtime chunk exclusion should work. +explain (costs off) select sum(c) from dvagg where a < stable_abs(1000); + + +-- Some negative cases. +explain (costs off) select sum(c) from dvagg group by grouping sets ((), (a)); + +explain (costs off) select sum(c) from dvagg having sum(c) > 0; + + +-- As a reference, the result on decompressed table. +select decompress_chunk(show_chunks('dvagg')); +select sum(c) from dvagg; + +drop table dvagg; diff --git a/tsl/test/sql/vector_agg_param.sql b/tsl/test/sql/vector_agg_param.sql new file mode 100644 index 00000000000..491a877556d --- /dev/null +++ b/tsl/test/sql/vector_agg_param.sql @@ -0,0 +1,28 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- Test parameterized vector aggregation plans. + + +create table pvagg(s int, a int); + +select create_hypertable('pvagg', 'a', chunk_time_interval => 1000); + +insert into pvagg select 1, generate_series(1, 999); +insert into pvagg select 2, generate_series(1001, 1999); + +alter table pvagg set (timescaledb.compress, timescaledb.compress_segmentby = 's'); + +select count(compress_chunk(x)) from show_chunks('pvagg') x; + +analyze pvagg; + + +explain (costs off) +select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg where s = x) xx; + +select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg where s = x) xx; + + +drop table pvagg; diff --git a/tsl/test/sql/vectorized_aggregation.sql b/tsl/test/sql/vectorized_aggregation.sql index b8cc7847f41..6ab98169f87 100644 --- a/tsl/test/sql/vectorized_aggregation.sql +++ b/tsl/test/sql/vectorized_aggregation.sql @@ -349,7 +349,7 @@ value2 AS segment_by_value2, value1 AS int_value, value1 AS float_value FROM -generate_series('1980-01-01 00:00:00-00', '1980-03-01 00:00:00-00', INTERVAL '1 day') AS g1(time), +generate_series('1980-01-03 00:00:00-00', '1980-03-04 00:00:00-00', INTERVAL '1 day') AS g1(time), generate_series(-10, 25, 1) AS g2(value1), generate_series(-30, 20, 1) AS g3(value2) ORDER BY time; @@ -359,6 +359,8 @@ SELECT sum(segment_by_value1), sum(segment_by_value2) FROM testtable2; SELECT compress_chunk(ch) FROM show_chunks('testtable2') ch; +ANALYZE testtable2; + :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2;