diff --git a/be/src/pipeline/exec/set_probe_sink_operator.cpp b/be/src/pipeline/exec/set_probe_sink_operator.cpp index 929062ec80b6c5e..a758c2ae3c60023 100644 --- a/be/src/pipeline/exec/set_probe_sink_operator.cpp +++ b/be/src/pipeline/exec/set_probe_sink_operator.cpp @@ -22,6 +22,7 @@ #include #include "pipeline/exec/operator.h" +#include "pipeline/pipeline_task.h" #include "vec/common/hash_table/hash_table_set_probe.h" namespace doris { @@ -70,7 +71,7 @@ Status SetProbeSinkOperatorX::sink(RuntimeState* state, vectorized SCOPED_TIMER(local_state.exec_time_counter()); COUNTER_UPDATE(local_state.rows_input_counter(), (int64_t)in_block->rows()); - uint32_t probe_rows = cast_set(in_block->rows()); + const auto probe_rows = cast_set(in_block->rows()); if (probe_rows > 0) { { SCOPED_TIMER(local_state._extract_probe_data_timer); @@ -93,7 +94,7 @@ Status SetProbeSinkOperatorX::sink(RuntimeState* state, vectorized local_state._shared_state->hash_table_variants->method_variant)); } - if (eos) { + if (eos && !state->get_task()->wake_up_early()) { _finalize_probe(local_state); } return Status::OK(); diff --git a/be/src/util/mem_info.h b/be/src/util/mem_info.h index 5d1d0c75367673a..3fa0fdbd709209d 100644 --- a/be/src/util/mem_info.h +++ b/be/src/util/mem_info.h @@ -107,6 +107,8 @@ class MemInfo { size_t value_size = sizeof(T); if (jemallctl(name.c_str(), &value, &value_size, nullptr, 0) == 0) { return value; + } else { + LOG(WARNING) << fmt::format("Failed, jemallctl get {}", name); } #endif return 0; @@ -123,9 +125,6 @@ class MemInfo { if (err) { LOG(WARNING) << fmt::format("Failed, jemallctl value for {} set to {} (old {})", name, value, old_value); - } else { - LOG(INFO) << fmt::format("Successfully, jemallctl value for {} set to {} (old {})", - name, value, old_value); } } catch (...) { LOG(WARNING) << fmt::format("Exception, jemallctl value for {} set to {} (old {})", @@ -140,8 +139,6 @@ class MemInfo { int err = jemallctl(name.c_str(), nullptr, nullptr, nullptr, 0); if (err) { LOG(WARNING) << fmt::format("Failed, jemallctl action {}", name); - } else { - LOG(INFO) << fmt::format("Successfully, jemallctl action {}", name); } } catch (...) { LOG(WARNING) << fmt::format("Exception, jemallctl action {}", name); @@ -181,8 +178,12 @@ class MemInfo { // Each time this interface is set, all currently unused dirty pages are considered // to have fully decayed, which causes immediate purging of all unused dirty pages unless // the decay time is set to -1 - set_jemallctl_value(fmt::format("arena.{}.dirty_decay_ms", MALLCTL_ARENAS_ALL), - dirty_decay_ms); + // + // NOTE: Using "arena.MALLCTL_ARENAS_ALL.dirty_decay_ms" to modify all arenas will fail or even crash, + // which may be a bug. + for (unsigned i = 0; i < get_jemallctl_value("arenas.narenas"); i++) { + set_jemallctl_value(fmt::format("arena.{}.dirty_decay_ms", i), dirty_decay_ms); + } #endif } diff --git a/be/src/vec/exec/format/table/transactional_hive_reader.cpp b/be/src/vec/exec/format/table/transactional_hive_reader.cpp index 18642ab1218b4d2..caf242700180c5a 100644 --- a/be/src/vec/exec/format/table/transactional_hive_reader.cpp +++ b/be/src/vec/exec/format/table/transactional_hive_reader.cpp @@ -205,6 +205,7 @@ Status TransactionalHiveReader::init_row_filters(const TFileRangeDesc& range, ++num_delete_files; } if (num_delete_rows > 0) { + orc_reader->set_push_down_agg_type(TPushAggOp::NONE); orc_reader->set_delete_rows(&_delete_rows); COUNTER_UPDATE(_transactional_orc_profile.num_delete_files, num_delete_files); COUNTER_UPDATE(_transactional_orc_profile.num_delete_rows, num_delete_rows); diff --git a/docker/thirdparties/docker-compose/kerberos/entrypoint-hive-master-2.sh b/docker/thirdparties/docker-compose/kerberos/entrypoint-hive-master-2.sh index c21460c3a57a0f5..eb95c5cb697619f 100755 --- a/docker/thirdparties/docker-compose/kerberos/entrypoint-hive-master-2.sh +++ b/docker/thirdparties/docker-compose/kerberos/entrypoint-hive-master-2.sh @@ -25,12 +25,26 @@ cp /etc/trino/conf/presto-server.keytab /keytabs/other-presto-server.keytab cp /keytabs/update-location.sh /etc/hadoop-init.d/update-location.sh /usr/local/hadoop-run.sh & -sleep 30 +# check healthy hear +echo "Waiting for hadoop to be healthy" + +for i in {1..10}; do + if /usr/local/health.sh; then + echo "Hadoop is healthy" + break + fi + echo "Hadoop is not healthy yet. Retrying in 20 seconds..." + sleep 20 +done + +if [ $i -eq 10 ]; then + echo "Hadoop did not become healthy after 120 attempts. Exiting." + exit 1 +fi echo "Init kerberos test data" kinit -kt /etc/hive/conf/hive.keytab hive/hadoop-master-2@OTHERREALM.COM hive -f /usr/local/sql/create_kerberos_hive_table.sql - -sleep 20 +touch /mnt/SUCCESS tail -f /dev/null diff --git a/docker/thirdparties/docker-compose/kerberos/entrypoint-hive-master.sh b/docker/thirdparties/docker-compose/kerberos/entrypoint-hive-master.sh index 62924992219a1d6..76f49724297a619 100755 --- a/docker/thirdparties/docker-compose/kerberos/entrypoint-hive-master.sh +++ b/docker/thirdparties/docker-compose/kerberos/entrypoint-hive-master.sh @@ -23,12 +23,26 @@ mkdir -p /etc/hadoop-init.d/ cp /etc/trino/conf/* /keytabs/ /usr/local/hadoop-run.sh & -sleep 30 +# check healthy hear +echo "Waiting for hadoop to be healthy" + +for i in {1..10}; do + if /usr/local/health.sh; then + echo "Hadoop is healthy" + break + fi + echo "Hadoop is not healthy yet. Retrying in 20 seconds..." + sleep 20 +done + +if [ $i -eq 10 ]; then + echo "Hadoop did not become healthy after 120 attempts. Exiting." + exit 1 +fi echo "Init kerberos test data" kinit -kt /etc/hive/conf/hive.keytab hive/hadoop-master@LABS.TERADATA.COM hive -f /usr/local/sql/create_kerberos_hive_table.sql - -sleep 20 +touch /mnt/SUCCESS tail -f /dev/null diff --git a/docker/thirdparties/docker-compose/kerberos/health-checks/hadoop-health-check.sh b/docker/thirdparties/docker-compose/kerberos/health-checks/hadoop-health-check.sh index 190fa838d6f64d1..77df431d85ac3ba 100755 --- a/docker/thirdparties/docker-compose/kerberos/health-checks/hadoop-health-check.sh +++ b/docker/thirdparties/docker-compose/kerberos/health-checks/hadoop-health-check.sh @@ -32,6 +32,7 @@ fi FAILED=$(supervisorctl status | grep -v RUNNING || true) if [ "$FAILED" == "" ]; then + echo "All services are running" exit 0 else echo "Some of the services are failing: ${FAILED}" diff --git a/docker/thirdparties/docker-compose/kerberos/health-checks/health.sh b/docker/thirdparties/docker-compose/kerberos/health-checks/health.sh old mode 100644 new mode 100755 index 515f37e36ac9e37..473d7ceaeb6166b --- a/docker/thirdparties/docker-compose/kerberos/health-checks/health.sh +++ b/docker/thirdparties/docker-compose/kerberos/health-checks/health.sh @@ -32,3 +32,4 @@ if test -d "${HEALTH_D}"; then "${health_script}" &>> /var/log/container-health.log || exit 1 done fi +exit 0 diff --git a/docker/thirdparties/docker-compose/kerberos/health-checks/hive-health-check-2.sh b/docker/thirdparties/docker-compose/kerberos/health-checks/hive-health-check-2.sh new file mode 100755 index 000000000000000..854524dac1fcff4 --- /dev/null +++ b/docker/thirdparties/docker-compose/kerberos/health-checks/hive-health-check-2.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +kinit -kt /etc/hive/conf/hive.keytab hive/hadoop-master-2@OTHERREALM.COM +beeline -u "jdbc:hive2://localhost:10000/default;principal=hive/hadoop-master-2@OTHERREALM.COM" -e "show databases;" \ No newline at end of file diff --git a/docker/thirdparties/docker-compose/kerberos/health-checks/hive-health-check.sh b/docker/thirdparties/docker-compose/kerberos/health-checks/hive-health-check.sh new file mode 100755 index 000000000000000..4d3d86f69a25c4a --- /dev/null +++ b/docker/thirdparties/docker-compose/kerberos/health-checks/hive-health-check.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +kinit -kt /etc/hive/conf/hive.keytab hive/hadoop-master@LABS.TERADATA.COM +beeline -u "jdbc:hive2://localhost:10000/default;principal=hive/hadoop-master@LABS.TERADATA.COM" -e "show databases;" \ No newline at end of file diff --git a/docker/thirdparties/docker-compose/kerberos/kerberos.yaml.tpl b/docker/thirdparties/docker-compose/kerberos/kerberos.yaml.tpl index 6aa353f3e0cfc82..e635ed6bb27f344 100644 --- a/docker/thirdparties/docker-compose/kerberos/kerberos.yaml.tpl +++ b/docker/thirdparties/docker-compose/kerberos/kerberos.yaml.tpl @@ -24,13 +24,17 @@ services: - ./sql:/usr/local/sql - ./common/hadoop/apply-config-overrides.sh:/etc/hadoop-init.d/00-apply-config-overrides.sh - ./common/hadoop/hadoop-run.sh:/usr/local/hadoop-run.sh + - ./health-checks/health.sh:/usr/local/health.sh - ./health-checks/hadoop-health-check.sh:/etc/health.d/hadoop-health-check.sh + - ./health-checks/hive-health-check.sh:/etc/health.d/hive-health-check.sh - ./entrypoint-hive-master.sh:/usr/local/entrypoint-hive-master.sh - restart: on-failure hostname: hadoop-master entrypoint: /usr/local/entrypoint-hive-master.sh healthcheck: - test: ./health-checks/health.sh + test: ["CMD", "ls", "/mnt/SUCCESS"] + interval: 20s + timeout: 60s + retries: 120 ports: - "5806:5006" - "8820:8020" @@ -46,17 +50,21 @@ services: image: doristhirdpartydocker/trinodb:hdp3.1-hive-kerberized-2_96 container_name: doris--kerberos2 hostname: hadoop-master-2 - restart: on-failure volumes: - ./two-kerberos-hives:/keytabs - ./sql:/usr/local/sql - ./common/hadoop/apply-config-overrides.sh:/etc/hadoop-init.d/00-apply-config-overrides.sh - ./common/hadoop/hadoop-run.sh:/usr/local/hadoop-run.sh + - ./health-checks/health.sh:/usr/local/health.sh - ./health-checks/hadoop-health-check.sh:/etc/health.d/hadoop-health-check.sh + - ./health-checks/hive-health-check-2.sh:/etc/health.d/hive-health-check-2.sh - ./entrypoint-hive-master-2.sh:/usr/local/entrypoint-hive-master-2.sh entrypoint: /usr/local/entrypoint-hive-master-2.sh healthcheck: - test: ./health-checks/health.sh + test: ["CMD", "ls", "/mnt/SUCCESS"] + interval: 20s + timeout: 60s + retries: 120 ports: - "15806:5006" - "18820:8020" diff --git a/docker/thirdparties/run-thirdparties-docker.sh b/docker/thirdparties/run-thirdparties-docker.sh index fd9558eef558b06..e3769025fec62d9 100755 --- a/docker/thirdparties/run-thirdparties-docker.sh +++ b/docker/thirdparties/run-thirdparties-docker.sh @@ -708,6 +708,11 @@ if [[ "${RUN_MINIO}" -eq 1 ]]; then pids["minio"]=$! fi +if [[ "${RUN_KERBEROS}" -eq 1 ]]; then + start_kerberos > start_kerberos.log 2>&1 & + pids["kerberos"]=$! +fi + echo "waiting all dockers starting done" for compose in "${!pids[@]}"; do @@ -727,15 +732,6 @@ for compose in "${!pids[@]}"; do fi done -if [[ "${RUN_KERBEROS}" -eq 1 ]]; then - echo "Starting Kerberos after all other components..." - start_kerberos > start_kerberos.log 2>&1 - if [ $? -ne 0 ]; then - echo "Kerberos startup failed" - cat start_kerberos.log - exit 1 - fi -fi echo "docker started" docker ps -a --format "{{.ID}} | {{.Image}} | {{.Status}}" echo "all dockers started successfully" diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java index c559570432fb17d..08cf65824472533 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java @@ -289,12 +289,12 @@ private void getFileSplitByPartitions(HiveMetaStoreCache cache, List expireTime; } diff --git a/regression-test/data/datatype_p0/nested_types/ddl/create_nestedtypes_with_schemachange.out b/regression-test/data/datatype_p0/nested_types/ddl/create_nestedtypes_with_schemachange.out index 5b811095fcbc336..efcecd7595305a1 100644 --- a/regression-test/data/datatype_p0/nested_types/ddl/create_nestedtypes_with_schemachange.out +++ b/regression-test/data/datatype_p0/nested_types/ddl/create_nestedtypes_with_schemachange.out @@ -1,5 +1,5 @@ -- This file is automatically generated. You should know what you did if you want to edit this --- !sql -- +-- !master_sql -- col0 bigint No true \N col_array array Yes false \N NONE col2 int No false \N NONE @@ -7,7 +7,7 @@ col3 array Yes false \N NONE col4 map Yes false \N NONE col5 struct Yes false \N NONE --- !sql -- +-- !master_sql -- col0 bigint No true \N col_map map Yes false \N NONE col2 int No false \N NONE @@ -15,7 +15,7 @@ col3 array Yes false \N NONE col4 map Yes false \N NONE col5 struct Yes false \N NONE --- !sql -- +-- !master_sql -- col0 bigint No true \N col_struct struct Yes false \N NONE col2 int No false \N NONE @@ -23,21 +23,21 @@ col3 array Yes false \N NONE col4 map Yes false \N NONE col5 struct Yes false \N NONE --- !sql -- +-- !master_sql -- col0 bigint No true \N col2 int No false \N NONE col3 array Yes false \N NONE col4 map Yes false \N NONE col5 struct Yes false \N NONE --- !sql -- +-- !master_sql -- col0 bigint No true \N col2 int No false \N NONE col3 array Yes false \N NONE col4 map Yes false \N NONE col5 struct Yes false \N NONE --- !sql -- +-- !master_sql -- col0 bigint No true \N col2 int No false \N NONE col3 array Yes false \N NONE @@ -47,7 +47,7 @@ col5 struct Yes false \N NONE -- !sql_before -- 1 2 [1, 2] {1:2} {"f1":1} {"a":[1,2,3]} --- !sql -- +-- !master_sql -- col0 bigint No true \N col2 int No false \N NONE col3 array Yes false \N NONE @@ -61,7 +61,7 @@ col6 variant Yes false \N NONE -- !sql_before -- 1 2 [1, 2] {1:2} {"f1":1} {"a":[1,2,3]} --- !sql -- +-- !master_sql -- col0 bigint No true \N col2 int No false \N NONE col3 array Yes false \N NONE @@ -75,7 +75,7 @@ col6 variant No false \N NONE -- !sql_before -- 1 2 [1, 2] {1:2} {"f1":1} {"a":[1,2,3]} --- !sql -- +-- !master_sql -- col0 bigint No true \N col2 int No false \N NONE col3 array Yes false \N NONE @@ -89,7 +89,7 @@ col6 variant Yes false \N NONE -- !sql_before -- 1 2 [1, 2] {1:2} {"f1":1} {"a":[1,2,3]} --- !sql -- +-- !master_sql -- col0 bigint No true \N col2 int No false \N NONE col3 array No false \N NONE @@ -103,7 +103,7 @@ col6 variant No false \N NONE -- !sql_before -- 1 2 [1, 2] {1:2} {"f1":1} {"a":[1,2,3]} --- !sql -- +-- !master_sql -- col0 bigint No true \N col2 int No false \N NONE col3 array Yes false \N NONE @@ -117,7 +117,7 @@ col6 variant Yes false \N NONE -- !sql_before -- 1 2 [1, 2] {1:2} {"f1":1} {"a":[1,2,3]} --- !sql -- +-- !master_sql -- col0 bigint No true \N col2 int No false \N NONE col3 array No false \N NONE diff --git a/regression-test/data/external_table_p0/hive/test_transactional_hive.out b/regression-test/data/external_table_p0/hive/test_transactional_hive.out index 060fa8c048e5a05..94e32a43db7f3bb 100644 --- a/regression-test/data/external_table_p0/hive/test_transactional_hive.out +++ b/regression-test/data/external_table_p0/hive/test_transactional_hive.out @@ -122,3 +122,18 @@ F -- !16 -- 4 DD +-- !count_1 -- +3 + +-- !count_2 -- +6 + +-- !count_3 -- +4 + +-- !count_4 -- +3 + +-- !count_5 -- +3 + diff --git a/regression-test/data/external_table_p2/hive/test_hive_translation_insert_only.out b/regression-test/data/external_table_p2/hive/test_hive_translation_insert_only.out index e4bdb3fe32d44bc..f43a630f4a387c1 100644 --- a/regression-test/data/external_table_p2/hive/test_hive_translation_insert_only.out +++ b/regression-test/data/external_table_p2/hive/test_hive_translation_insert_only.out @@ -19,3 +19,12 @@ 4 D 5 E +-- !count_1 -- +4 + +-- !count_2 -- +5 + +-- !count_3 -- +5 + diff --git a/regression-test/data/schema_change_p0/modify_col_type_agg/schema_change_modify_mv_column_type_agg.out b/regression-test/data/schema_change_p0/modify_col_type_agg/schema_change_modify_mv_column_type_agg.out index c86341a56862c3e..b1feac9a13a341f 100644 --- a/regression-test/data/schema_change_p0/modify_col_type_agg/schema_change_modify_mv_column_type_agg.out +++ b/regression-test/data/schema_change_p0/modify_col_type_agg/schema_change_modify_mv_column_type_agg.out @@ -28,7 +28,7 @@ mv_tbl_scalar_types_agg_1 AGG_KEYS mv_k2 bigint bigint Yes true \N true `k2` -- !sql -- --- !sql -- +-- !master_sql -- tbl_scalar_types_agg AGG_KEYS k1 bigint bigint Yes true \N true k2 bigint bigint Yes true \N true c_bool boolean boolean Yes false \N REPLACE true diff --git a/regression-test/data/schema_change_p0/modify_col_type_dup/schema_change_modify_mv_column_type.out b/regression-test/data/schema_change_p0/modify_col_type_dup/schema_change_modify_mv_column_type.out index e7d4c6754770489..3503eca20850cff 100644 --- a/regression-test/data/schema_change_p0/modify_col_type_dup/schema_change_modify_mv_column_type.out +++ b/regression-test/data/schema_change_p0/modify_col_type_dup/schema_change_modify_mv_column_type.out @@ -62,7 +62,7 @@ mv_tbl_scalar_types_dup_1 DUP_KEYS mv_c_tinyint tinyint tinyint Yes true \N tru -2106969609 true 10 29572 16738 1736115820 -957295886 -13319.206 -1.333603562816737E9 91224478600376111.942 69457425159617037.453 2022-09-06 2022-05-08T19:52:36 2022-04-05 2022-08-17T19:23:31 222.79.139.99 WalterFox@Voomm.net Oxford Alley 77 -2102307005 true 10 -23674 24613 -1810828490 -47095409 -14686.167 2.072108685694799E9 39847820962230526.125 584354832299375.156 2022-03-27 2022-02-11T13:46:06 2022-12-25 2022-11-28T09:37:49 213.146.33.250 JuliaSimmons@Zazio.info Eagle Crest Terrace 84 --- !sql -- +-- !master_sql -- tbl_scalar_types_dup DUP_KEYS k1 bigint bigint Yes true \N true c_bool boolean boolean Yes false \N NONE true c_tinyint tinyint tinyint Yes false \N NONE true diff --git a/regression-test/data/schema_change_p0/modify_col_type_dup2/schema_change_modify_mv_column_type2.out b/regression-test/data/schema_change_p0/modify_col_type_dup2/schema_change_modify_mv_column_type2.out index 5c0ee14eb32667b..024ed9d867e2019 100644 --- a/regression-test/data/schema_change_p0/modify_col_type_dup2/schema_change_modify_mv_column_type2.out +++ b/regression-test/data/schema_change_p0/modify_col_type_dup2/schema_change_modify_mv_column_type2.out @@ -1,5 +1,5 @@ -- This file is automatically generated. You should know what you did if you want to edit this --- !sql -- +-- !master_sql -- tbl_scalar_types_dup DUP_KEYS k1 bigint bigint Yes true \N true c_bool boolean boolean Yes false \N NONE true c_tinyint tinyint tinyint Yes false \N NONE true diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy index 3995371db952da6..5887fd607d44a91 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy @@ -1292,7 +1292,7 @@ class Suite implements GroovyInterceptable { } else if (tag.contains("target_sql")) { tupleResult = JdbcUtils.executeToStringList(context.getTargetConnection(this), (String) arg) } else if (tag.contains("master_sql")) { - tupleResult = JdbcUtils.executeToStringList(context.getMasterConnection(), (PreparedStatement) arg) + tupleResult = JdbcUtils.executeToStringList(context.getMasterConnection(), (String) arg) } else { tupleResult = JdbcUtils.executeToStringList(context.getConnection(), (String) arg) } diff --git a/regression-test/suites/compaction/test_cumu_compaction_with_delete.groovy b/regression-test/suites/compaction/test_cumu_compaction_with_delete.groovy index 7ad5adf069ed7f0..4ac3953f55a0f1b 100644 --- a/regression-test/suites/compaction/test_cumu_compaction_with_delete.groovy +++ b/regression-test/suites/compaction/test_cumu_compaction_with_delete.groovy @@ -69,6 +69,11 @@ suite("test_cumu_compaction_with_delete") { break; } Thread.sleep(10000) + + def duration = System.currentTimeMillis() - now + if(duration > 10 * 60 * 1000) { + assertTrue(false) + } } def time_diff = System.currentTimeMillis() - now logger.info("time_diff:" + time_diff) @@ -108,6 +113,11 @@ suite("test_cumu_compaction_with_delete") { break; } Thread.sleep(10000) + + def duration = System.currentTimeMillis() - now + if(duration > 10 * 60 * 1000) { + assertTrue(false) + } } def time_diff = System.currentTimeMillis() - now logger.info("time_diff:" + time_diff) diff --git a/regression-test/suites/datatype_p0/nested_types/ddl/create_nestedtypes_with_schemachange.groovy b/regression-test/suites/datatype_p0/nested_types/ddl/create_nestedtypes_with_schemachange.groovy index 62b9ab3bb567f47..26db35cbbe04bca 100644 --- a/regression-test/suites/datatype_p0/nested_types/ddl/create_nestedtypes_with_schemachange.groovy +++ b/regression-test/suites/datatype_p0/nested_types/ddl/create_nestedtypes_with_schemachange.groovy @@ -45,7 +45,7 @@ suite("create_nestedtypes_with_schemachange", "p0") { } } // desc table - qt_sql "DESC $testTablex" + qt_master_sql "DESC $testTablex" } // array @@ -102,7 +102,7 @@ suite("create_nestedtypes_with_schemachange", "p0") { } } // desc table - qt_sql "DESC $testTablex" + qt_master_sql "DESC $testTablex" qt_sql_after "select * from $testTablex" } diff --git a/regression-test/suites/external_table_p0/hive/test_transactional_hive.groovy b/regression-test/suites/external_table_p0/hive/test_transactional_hive.groovy index 4f7008ec1726fa7..a12ab8a4f78cccf 100644 --- a/regression-test/suites/external_table_p0/hive/test_transactional_hive.groovy +++ b/regression-test/suites/external_table_p0/hive/test_transactional_hive.groovy @@ -114,6 +114,14 @@ suite("test_transactional_hive", "p0,external,hive,external_docker,external_dock } } + + def test_acid_count = { + qt_count_1 """ select count(*) from orc_full_acid; """ // 3 + qt_count_2 """ select count(*) from orc_full_acid_par; """ // 6 + qt_count_3 """ select count(*) from orc_to_acid_compacted_tb; """ //4 + qt_count_4 """ select count(*) from orc_acid_minor; """ //3 + qt_count_5 """ select count(*) from orc_acid_major; """ //3 + } String enabled = context.config.otherConfigs.get("enableHiveTest") @@ -148,6 +156,10 @@ suite("test_transactional_hive", "p0,external,hive,external_docker,external_dock test_acid() test_acid_write() + + test_acid_count() + + sql """drop catalog if exists ${catalog_name}""" } finally { } diff --git a/regression-test/suites/external_table_p2/hive/test_hive_translation_insert_only.groovy b/regression-test/suites/external_table_p2/hive/test_hive_translation_insert_only.groovy index 758417c32373b5f..f7135175152ce9a 100644 --- a/regression-test/suites/external_table_p2/hive/test_hive_translation_insert_only.groovy +++ b/regression-test/suites/external_table_p2/hive/test_hive_translation_insert_only.groovy @@ -45,6 +45,11 @@ suite("test_hive_translation_insert_only", "p2,external,hive,external_remote,ext qt_2 """ select * from parquet_insert_only_major order by id """ qt_3 """ select * from orc_insert_only_minor order by id """ + qt_count_1 """ select count(*) from text_insert_only """ //4 + qt_count_2 """ select count(*) from parquet_insert_only_major """ //5 + qt_count_3 """ select count(*) from orc_insert_only_minor """ //5 + + sql """drop catalog ${hms_catalog_name};""" } diff --git a/regression-test/suites/fault_injection_p0/test_memtable_flush_is_high_priority_for_vtablet_writerV2.groovy b/regression-test/suites/fault_injection_p0/test_memtable_flush_is_high_priority_for_vtablet_writerV2.groovy index e91a06e2cf0b4c6..42e9fb865e85365 100644 --- a/regression-test/suites/fault_injection_p0/test_memtable_flush_is_high_priority_for_vtablet_writerV2.groovy +++ b/regression-test/suites/fault_injection_p0/test_memtable_flush_is_high_priority_for_vtablet_writerV2.groovy @@ -58,7 +58,7 @@ suite("test_memtable_flush_is_high_priority_for_vtablet_writerV2", "nonConcurren qt_sql """select * from ${testTable} order by id""" sql """set enable_memtable_on_sink_node = ${original_status}""" } catch(Exception e) { - log.error(e.message()) + log.error(e.getMessage()) } finally { GetDebugPoint().disableDebugPointForAllBEs("VTabletWriterV2._init.is_high_priority") } diff --git a/regression-test/suites/fault_injection_p0/test_memtable_flush_is_high_priority_for_vtablet_writerv1_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_memtable_flush_is_high_priority_for_vtablet_writerv1_fault_injection.groovy index b9fd8ecf5517001..d1e18ed0a63e6d6 100644 --- a/regression-test/suites/fault_injection_p0/test_memtable_flush_is_high_priority_for_vtablet_writerv1_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_memtable_flush_is_high_priority_for_vtablet_writerv1_fault_injection.groovy @@ -58,7 +58,7 @@ suite("test_memtable_flush_is_high_priority_for_vtablet_writerV1", "nonConcurren qt_sql """select * from ${testTable} order by id""" sql """set enable_memtable_on_sink_node = ${original_status}""" } catch(Exception e) { - log.error(e.message()) + log.error(e.getMessage()) } finally { GetDebugPoint().disableDebugPointForAllBEs("VTabletWriter._init.is_high_priority") } diff --git a/regression-test/suites/inject_hdfs_vault_p0/inject_hdfs_load_error.groovy b/regression-test/suites/inject_hdfs_vault_p0/inject_hdfs_load_error.groovy index 407f914f96466e8..cc24c06e4093c8a 100755 --- a/regression-test/suites/inject_hdfs_vault_p0/inject_hdfs_load_error.groovy +++ b/regression-test/suites/inject_hdfs_vault_p0/inject_hdfs_load_error.groovy @@ -18,6 +18,10 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("inject_hdfs_load_error", "nonConcurrent") { + if (!isCloudMode()) { + logger.info("skip create storgage vault case") + return + } if (!enableStoragevault()) { logger.info("skip create storgage vault case") return diff --git a/regression-test/suites/inject_hdfs_vault_p0/inject_hdfs_select_error.groovy b/regression-test/suites/inject_hdfs_vault_p0/inject_hdfs_select_error.groovy index e49a9a242b2d9cb..c993df538d0b6bc 100755 --- a/regression-test/suites/inject_hdfs_vault_p0/inject_hdfs_select_error.groovy +++ b/regression-test/suites/inject_hdfs_vault_p0/inject_hdfs_select_error.groovy @@ -18,6 +18,10 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("inject_hdfs_load_error", "nonConcurrent") { + if (!isCloudMode()) { + logger.info("skip create storgage vault case") + return + } if (!enableStoragevault()) { logger.info("skip create storgage vault case") return diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy index 5d9c53ccfb23d79..6b8b64ec11e1537 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy @@ -288,7 +288,7 @@ suite("test_index_compaction_with_multi_index_segments", "nonConcurrent") { assert (rowsetCount == 3 * replicaNum) // trigger full compactions for all tablets in ${tableName} - trigger_and_wait_compaction.call(tableName, "full") + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets) if (isCloudMode) { diff --git a/regression-test/suites/inverted_index_p0/test_inverted_index_file_size.groovy b/regression-test/suites/inverted_index_p0/test_inverted_index_file_size.groovy index 73446089bb209ce..b0fee7cc5579bc4 100644 --- a/regression-test/suites/inverted_index_p0/test_inverted_index_file_size.groovy +++ b/regression-test/suites/inverted_index_p0/test_inverted_index_file_size.groovy @@ -87,7 +87,7 @@ suite("test_inverted_index_file_size", "nonConcurrent"){ qt_sql """ select count() from ${tableName} where clientip match '17.0.0.0' and request match 'GET' and status match '200' and size > 200 """ qt_sql """ select count() from ${tableName} where clientip match_phrase '17.0.0.0' and request match_phrase 'GET' and status match '200' and size > 200 """ - trigger_and_wait_compaction.call(tableName, "full") + trigger_and_wait_compaction(tableName, "full") qt_sql """ select count() from ${tableName} where clientip match '17.0.0.0' and request match 'GET' and status match '200' and size > 200 """ qt_sql """ select count() from ${tableName} where clientip match_phrase '17.0.0.0' and request match_phrase 'GET' and status match '200' and size > 200 """ diff --git a/regression-test/suites/inverted_index_p0/test_inverted_index_v2_file_size.groovy b/regression-test/suites/inverted_index_p0/test_inverted_index_v2_file_size.groovy index e9ab66bbad40b13..76017c8bb305656 100644 --- a/regression-test/suites/inverted_index_p0/test_inverted_index_v2_file_size.groovy +++ b/regression-test/suites/inverted_index_p0/test_inverted_index_v2_file_size.groovy @@ -100,7 +100,7 @@ suite("test_index_index_V2_file_size", "nonConcurrent") { qt_sql """ select * from ${tableName} where score < 100 order by id, name, hobbies, score """ // trigger full compactions for all tablets in ${tableName} - trigger_and_wait_compaction.call(tableName, "full") + trigger_and_wait_compaction(tableName, "full") def dedup_tablets = deduplicate_tablets(tablets) @@ -135,7 +135,7 @@ suite("test_index_index_V2_file_size", "nonConcurrent") { set_be_config.call("inverted_index_compaction_enable", "false") // trigger full compactions for all tablets in ${tableName} - trigger_and_wait_compaction.call(tableName, "full") + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. count = get_rowset_count.call(tablets); diff --git a/regression-test/suites/load_p0/routine_load/test_routine_load_with_user.groovy b/regression-test/suites/load_p0/routine_load/test_routine_load_with_user.groovy index 7b01dbacc6fa2d2..73cce57822fab3f 100644 --- a/regression-test/suites/load_p0/routine_load/test_routine_load_with_user.groovy +++ b/regression-test/suites/load_p0/routine_load/test_routine_load_with_user.groovy @@ -66,6 +66,19 @@ suite("test_routine_load_with_user","p0") { assertTrue(!clusters.isEmpty()) def validCluster = clusters[0][0] sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + + try { + def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> row[0]).collect(Collectors.toSet()) + logger.info("all vaults: ${storageVaults}") + for (String vault in storageVaults) { + sql """ + GRANT usage_priv ON storage vault ${vault} TO '${user}'; + """ + } + } catch (Exception e) { + // cloud instance may doesn't support storage vault + logger.info(e.getMessage()) + } } connect(user, "${pwd}", context.config.jdbcUrl) { diff --git a/regression-test/suites/node_p0/test_frontend.groovy b/regression-test/suites/node_p0/test_frontend.groovy index 29ee3e06315a303..57d1445e63db319 100644 --- a/regression-test/suites/node_p0/test_frontend.groovy +++ b/regression-test/suites/node_p0/test_frontend.groovy @@ -22,7 +22,7 @@ suite("test_frontend", "nonConcurrent") { def res2 = sql """SHOW FRONTENDS Disks""" assertTrue(res2.size() != 0) - if (Config.isCloudMode()) { + if (isCloudMode()) { // In the test_sql_mode_node_mgr regression case, there is already a similar and more complex case. This case is redundant. Additionally, there is a 5-minute limit for dropping FE on the cloud. // so ignore it in cloud return; diff --git a/regression-test/suites/schema_change_p0/modify_col_type_agg/schema_change_modify_mv_column_type_agg.groovy b/regression-test/suites/schema_change_p0/modify_col_type_agg/schema_change_modify_mv_column_type_agg.groovy index c1613c696251a50..dd5cc5c47ee138f 100644 --- a/regression-test/suites/schema_change_p0/modify_col_type_agg/schema_change_modify_mv_column_type_agg.groovy +++ b/regression-test/suites/schema_change_p0/modify_col_type_agg/schema_change_modify_mv_column_type_agg.groovy @@ -92,6 +92,6 @@ suite("schema_change_modify_mv_column_type_agg") { } } } - qt_sql """ desc ${testTable} all """ + qt_master_sql """ desc ${testTable} all """ sql "INSERT INTO ${testTable} SELECT * from ${testTable}" } diff --git a/regression-test/suites/schema_change_p0/modify_col_type_dup/schema_change_modify_mv_column_type.groovy b/regression-test/suites/schema_change_p0/modify_col_type_dup/schema_change_modify_mv_column_type.groovy index 3ea57fefff573e1..92c3870c56339be 100644 --- a/regression-test/suites/schema_change_p0/modify_col_type_dup/schema_change_modify_mv_column_type.groovy +++ b/regression-test/suites/schema_change_p0/modify_col_type_dup/schema_change_modify_mv_column_type.groovy @@ -95,6 +95,6 @@ suite("schema_change_modify_mv_column_type") { } // sync materialized view rewrite will fail when schema change, tmp disable, enable when fixed sql """set enable_dml_materialized_view_rewrite = false;""" - qt_sql """ desc ${testTable} all """ + qt_master_sql """ desc ${testTable} all """ sql "INSERT INTO ${testTable} SELECT * from ${testTable}" } diff --git a/regression-test/suites/schema_change_p0/modify_col_type_dup2/schema_change_modify_mv_column_type2.groovy b/regression-test/suites/schema_change_p0/modify_col_type_dup2/schema_change_modify_mv_column_type2.groovy index c2f7caa1af4f5fd..2f475992b494372 100644 --- a/regression-test/suites/schema_change_p0/modify_col_type_dup2/schema_change_modify_mv_column_type2.groovy +++ b/regression-test/suites/schema_change_p0/modify_col_type_dup2/schema_change_modify_mv_column_type2.groovy @@ -69,7 +69,7 @@ suite("schema_change_modify_mv_column_type2") { } } createMV ("""CREATE MATERIALIZED VIEW mv_${testTable}_2 AS SELECT k1, sum(c_int), max(c_int), min(c_int) FROM ${testTable} GROUP BY k1""") - qt_sql """ desc ${testTable} all """ + qt_master_sql """ desc ${testTable} all """ sql "set topn_opt_limit_threshold = 100" qt_sql "SELECT * from ${testTable} order by 1, 2, 3 limit 10" qt_sql "SELECT * from ${testTable} where c_tinyint = 10 order by 1, 2, 3 limit 10 " diff --git a/regression-test/suites/show_p0/test_show_tablet.groovy b/regression-test/suites/show_p0/test_show_tablet.groovy index 59e7c1a8e4da2f2..024e90fdd198bf0 100644 --- a/regression-test/suites/show_p0/test_show_tablet.groovy +++ b/regression-test/suites/show_p0/test_show_tablet.groovy @@ -25,14 +25,32 @@ suite("test_show_tablet") { PROPERTIES ( "replication_num" = "1" );""" - def res = sql """SHOW TABLETS FROM show_tablets_test_t limit 5, 1;""" - logger.info("result: " + res.toString()); - assertTrue(res.size() == 0) + def res = sql """ SHOW TABLETS FROM show_tablets_test_t """ + if (res.size() == 5) { + // replication num == 1 + res = sql """SHOW TABLETS FROM show_tablets_test_t limit 5, 1;""" + logger.info("result: " + res.toString()); + assertTrue(res.size() == 0) - res = sql """SHOW TABLETS FROM show_tablets_test_t limit 3, 5;""" - assertTrue(res.size() == 2) + res = sql """SHOW TABLETS FROM show_tablets_test_t limit 3, 5;""" + assertTrue(res.size() == 2) - res = sql """SHOW TABLETS FROM show_tablets_test_t limit 10;""" - assertTrue(res.size() == 5) + res = sql """SHOW TABLETS FROM show_tablets_test_t limit 10;""" + assertTrue(res.size() == 5) + } else if (res.size() == 15) { + // in multi-be cluster and force_olap_table_replication_num=3 + // will change to 3 replication even though set "replication_num" = "1" in create table + res = sql """SHOW TABLETS FROM show_tablets_test_t limit 15, 1;""" + logger.info("result: " + res.toString()); + assertTrue(res.size() == 0) + + res = sql """SHOW TABLETS FROM show_tablets_test_t limit 13, 5;""" + assertTrue(res.size() == 2) + + res = sql """SHOW TABLETS FROM show_tablets_test_t limit 15;""" + assertTrue(res.size() == 15) + } else { + assertTrue(1 == 2) + } } \ No newline at end of file diff --git a/regression-test/suites/ssb_sf0.1_p1/load.groovy b/regression-test/suites/ssb_sf0.1_p1/load.groovy index 3b3955dc3ac7e63..240bd8a1396ed09 100644 --- a/regression-test/suites/ssb_sf0.1_p1/load.groovy +++ b/regression-test/suites/ssb_sf0.1_p1/load.groovy @@ -89,7 +89,7 @@ suite("load") { sql new File("""${context.file.parent}/ddl/${table}_delete.sql""").text sql "set insert_timeout=3600" def r = sql "select @@insert_timeout" - year_cons = [ + def year_cons = [ 'lo_orderdate<19930101', 'lo_orderdate>=19930101 and lo_orderdate<19940101', 'lo_orderdate>=19940101 and lo_orderdate<19950101',