From c9b81795d16175b59bd5cc2dc28cea135b5ba60b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E5=AE=B6=E7=92=87?= Date: Mon, 16 Oct 2023 17:27:40 +0800 Subject: [PATCH] [querier] modify table of deepflow_system - run automation test (querier) pass --- server/querier/engine/clickhouse/clickhouse.go | 3 +++ server/querier/engine/clickhouse/clickhouse_test.go | 6 +++--- server/querier/engine/clickhouse/common/utils.go | 3 +++ server/querier/engine/clickhouse/from.go | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/server/querier/engine/clickhouse/clickhouse.go b/server/querier/engine/clickhouse/clickhouse.go index f372584cfb4..ecc04f4d53d 100644 --- a/server/querier/engine/clickhouse/clickhouse.go +++ b/server/querier/engine/clickhouse/clickhouse.go @@ -744,6 +744,9 @@ func (e *CHEngine) TransFrom(froms sqlparser.TableExprs) error { // ext_metrics只有metrics表,使用virtual_table_name做过滤区分 if e.DB == "ext_metrics" { table = "metrics" + } else if e.DB == "deepflow_system" { + // deepflow_system 只有 deepflow_system 表,使用 virtual_table_name 做过滤区分 + table = "deepflow_system" } else if e.DB == chCommon.DB_NAME_PROMETHEUS { whereStmt := Where{} metricIDFilter, err := GetMetricIDFilter(e.DB, e.Table) diff --git a/server/querier/engine/clickhouse/clickhouse_test.go b/server/querier/engine/clickhouse/clickhouse_test.go index 95d8e82d3a7..d7bf549b4e8 100644 --- a/server/querier/engine/clickhouse/clickhouse_test.go +++ b/server/querier/engine/clickhouse/clickhouse_test.go @@ -159,7 +159,7 @@ var ( db: "ext_metrics", }, { input: "select Sum(`metrics.pending`) from `deepflow_server.queue`", - output: "SELECT SUM(if(indexOf(metrics_float_names, 'pending')=0,null,metrics_float_values[indexOf(metrics_float_names, 'pending')])) AS `Sum(metrics.pending)` FROM deepflow_system.`deepflow_server.queue` LIMIT 10000", + output: "SELECT SUM(if(indexOf(metrics_float_names, 'pending')=0,null,metrics_float_values[indexOf(metrics_float_names, 'pending')])) AS `Sum(metrics.pending)` FROM deepflow_system.`deepflow_system` PREWHERE (virtual_table_name='deepflow_server.queue') LIMIT 10000", db: "deepflow_system", }, { input: "select `k8s.label_0` from l7_flow_log", @@ -219,11 +219,11 @@ var ( db: "flow_metrics", }, { input: "SELECT time(time,5,1,0) as toi, Avg(`metrics.dropped`) AS `Avg(metrics.dropped)` FROM `deepflow_agent_collect_sender` GROUP BY toi ORDER BY toi desc", - output: "WITH toStartOfInterval(time, toIntervalSecond(10)) + toIntervalSecond(arrayJoin([0]) * 10) AS `_toi` SELECT toUnixTimestamp(`_toi`) AS `toi`, AVG(if(indexOf(metrics_float_names, 'dropped')=0,null,metrics_float_values[indexOf(metrics_float_names, 'dropped')])) AS `Avg(metrics.dropped)` FROM deepflow_system.`deepflow_agent_collect_sender` GROUP BY `toi` ORDER BY `toi` desc LIMIT 10000", + output: "WITH toStartOfInterval(time, toIntervalSecond(10)) + toIntervalSecond(arrayJoin([0]) * 10) AS `_toi` SELECT toUnixTimestamp(`_toi`) AS `toi`, AVG(if(indexOf(metrics_float_names, 'dropped')=0,null,metrics_float_values[indexOf(metrics_float_names, 'dropped')])) AS `Avg(metrics.dropped)` FROM deepflow_system.`deepflow_system` PREWHERE (virtual_table_name='deepflow_agent_collect_sender') GROUP BY `toi` ORDER BY `toi` desc LIMIT 10000", db: "deepflow_system", }, { input: "SELECT time(time,120,1,0) as toi, Avg(`metrics.dropped`) AS `Avg(metrics.dropped)` FROM `deepflow_agent_collect_sender` GROUP BY toi ORDER BY toi desc", - output: "WITH toStartOfInterval(time, toIntervalSecond(120)) + toIntervalSecond(arrayJoin([0]) * 120) AS `_toi` SELECT toUnixTimestamp(`_toi`) AS `toi`, AVG(if(indexOf(metrics_float_names, 'dropped')=0,null,metrics_float_values[indexOf(metrics_float_names, 'dropped')])) AS `Avg(metrics.dropped)` FROM deepflow_system.`deepflow_agent_collect_sender` GROUP BY `toi` ORDER BY `toi` desc LIMIT 10000", + output: "WITH toStartOfInterval(time, toIntervalSecond(120)) + toIntervalSecond(arrayJoin([0]) * 120) AS `_toi` SELECT toUnixTimestamp(`_toi`) AS `toi`, AVG(if(indexOf(metrics_float_names, 'dropped')=0,null,metrics_float_values[indexOf(metrics_float_names, 'dropped')])) AS `Avg(metrics.dropped)` FROM deepflow_system.`deepflow_system` PREWHERE (virtual_table_name='deepflow_agent_collect_sender') GROUP BY `toi` ORDER BY `toi` desc LIMIT 10000", db: "deepflow_system", }, { input: "SELECT chost_id_0 from l4_flow_log WHERE NOT exist(chost_0) LIMIT 1", diff --git a/server/querier/engine/clickhouse/common/utils.go b/server/querier/engine/clickhouse/common/utils.go index 2707a3ebf4b..01e3ce93f4b 100644 --- a/server/querier/engine/clickhouse/common/utils.go +++ b/server/querier/engine/clickhouse/common/utils.go @@ -181,6 +181,9 @@ func GetExtTables(db string, ctx context.Context) (values []interface{}) { if db == "ext_metrics" { sql = "SELECT table FROM flow_tag.ext_metrics_custom_field GROUP BY table" chClient.DB = "flow_tag" + } else if db == "deepflow_system" { + sql = "SELECT table FROM flow_tag.deepflow_system_custom_field GROUP BY table" + chClient.DB = "flow_tag" } else { sql = "SHOW TABLES FROM " + db } diff --git a/server/querier/engine/clickhouse/from.go b/server/querier/engine/clickhouse/from.go index 1a0591a2725..7d9f1c76147 100644 --- a/server/querier/engine/clickhouse/from.go +++ b/server/querier/engine/clickhouse/from.go @@ -32,7 +32,7 @@ func (t *Table) Format(m *view.Model) { } func GetVirtualTableFilter(db, table string) (view.Node, bool) { - if db == "ext_metrics" { + if db == "ext_metrics" || db == "deepflow_system" { filter := fmt.Sprintf("virtual_table_name='%s'", table) return &view.Expr{Value: "(" + filter + ")"}, true }