diff --git a/regression-test/plugins/cloud_compaction_plugin.groovy b/regression-test/plugins/cloud_compaction_plugin.groovy index 8381fd28a8eefe..2bc2c61ce83d69 100644 --- a/regression-test/plugins/cloud_compaction_plugin.groovy +++ b/regression-test/plugins/cloud_compaction_plugin.groovy @@ -75,6 +75,9 @@ Suite.metaClass.doCloudCompaction = { String tableName /* param */ -> //assertEquals("success", compactJson.status.toLowerCase()) } + // waiting compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { boolean running = true diff --git a/regression-test/plugins/plugin_compaction.groovy b/regression-test/plugins/plugin_compaction.groovy new file mode 100644 index 00000000000000..1b79bccd13f143 --- /dev/null +++ b/regression-test/plugins/plugin_compaction.groovy @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods +import org.awaitility.Awaitility + +/* +* make sure compactions are triggered and sleep a short time ( maybe 10 seconds ) waiting for compactions starting. +*/ +Suite.metaClass.checkComactionStatus = { String backendIP, String backendPort, String tabletID -> + def (code, out, err) = be_get_compaction_status(backendIP, backendPort, tabletID) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + if (code != 0) { + return false + } + + def compactionStatus = parseJson(out.trim()) + if ("success" == compactionStatus.status.toLowerCase()) { + // run_status == true means compactions are running + // run_status == false means compaction are finished + return !compactionStatus.run_status + } else { + return false + } +} + +Suite.metaClass.assertCompactionStatus = { String backendIP, String backendPort, String tabletID -> + Awaitility.await().atMost(30, TimeUnit.SECONDS).pollInterval(1, SECONDS).untilAsserted({ + assert checkComactionStatus(backendIP, backendPort, tabletID) + }) +} + +Suite.metaClass.assertCompactionStatusAtMost = { String backendIP, String backendPort, String tabletID, long t, TimeUnit tu -> + Awaitility.await().atMost(t, tu).pollInterval(1, SECONDS).untilAsserted({ + assert checkComactionStatus(backendIP, backendPort, tabletID) + }) +} + +// let table do full compaction, and waiting for them done. +// TODO: auto compaction what. +Suite.metaClass.doCompactionWaitDone = { String tableName -> + // get table tablets. + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + // trigger compactions for all tablets in ${tableName} + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + def backend_id = tablet.BackendId + def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactJson = parseJson(out.trim()) + if (compactJson.status.toLowerCase() == "fail") { + assertEquals(disableAutoCompaction, false) + logger.info("Compaction was done automatically!") + } + if (disableAutoCompaction) { + assertEquals("success", compactJson.status.toLowerCase()) + } + } + + // waiting compaction to start + Thread.sleep(10000) + + // wait for all compactions done + for (def tablet in tablets) { + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) + } +} + diff --git a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy index 8975d92f2ee893..7eff993db6dd41 100644 --- a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy +++ b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy @@ -189,6 +189,9 @@ suite("test_stale_rowset") { } } + // waiting compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/compaction_width_array_column.groovy b/regression-test/suites/compaction/compaction_width_array_column.groovy index 4e3fed354c7d84..e20fb727a48a0a 100644 --- a/regression-test/suites/compaction/compaction_width_array_column.groovy +++ b/regression-test/suites/compaction/compaction_width_array_column.groovy @@ -98,6 +98,9 @@ suite('compaction_width_array_column', "p2") { assertEquals("success", compactJson.status.toLowerCase()) } + // waiting compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_base_compaction.groovy b/regression-test/suites/compaction/test_base_compaction.groovy index 83f6b44e611acd..4a27488072dc78 100644 --- a/regression-test/suites/compaction/test_base_compaction.groovy +++ b/regression-test/suites/compaction/test_base_compaction.groovy @@ -146,18 +146,12 @@ suite("test_base_compaction", "p2") { assertEquals("success", compactJson.status.toLowerCase()) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } streamLoad { @@ -206,18 +200,12 @@ suite("test_base_compaction", "p2") { assertEquals("success", compactJson.status.toLowerCase()) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } qt_select_default """ SELECT count(*) FROM ${tableName} """ @@ -235,18 +223,12 @@ suite("test_base_compaction", "p2") { assertEquals("success", compactJson.status.toLowerCase()) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } def replicaNum = get_table_replica_num(tableName) diff --git a/regression-test/suites/compaction/test_base_compaction_no_value.groovy b/regression-test/suites/compaction/test_base_compaction_no_value.groovy index 1ed30459521581..7fb812cf79e415 100644 --- a/regression-test/suites/compaction/test_base_compaction_no_value.groovy +++ b/regression-test/suites/compaction/test_base_compaction_no_value.groovy @@ -16,7 +16,6 @@ // under the License. import org.codehaus.groovy.runtime.IOGroovyMethods -import org.awaitility.Awaitility suite("test_base_compaction_no_value", "p2") { def tableName = "base_compaction_uniq_keys_no_value" @@ -146,18 +145,12 @@ suite("test_base_compaction_no_value", "p2") { assertEquals("success", compactJson.status.toLowerCase()) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } streamLoad { @@ -206,18 +199,12 @@ suite("test_base_compaction_no_value", "p2") { assertEquals("success", compactJson.status.toLowerCase()) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } qt_select_default """ SELECT count(*) FROM ${tableName} """ @@ -234,19 +221,13 @@ suite("test_base_compaction_no_value", "p2") { def compactJson = parseJson(out.trim()) assertEquals("success", compactJson.status.toLowerCase()) } + + // wait compaction to start + Thread.sleep(10000) // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } def replicaNum = get_table_replica_num(tableName) diff --git a/regression-test/suites/compaction/test_compacation_with_delete.groovy b/regression-test/suites/compaction/test_compacation_with_delete.groovy index e41f787c52276e..da09bd8c192a57 100644 --- a/regression-test/suites/compaction/test_compacation_with_delete.groovy +++ b/regression-test/suites/compaction/test_compacation_with_delete.groovy @@ -114,6 +114,9 @@ suite("test_compaction_with_delete") { assertEquals("success", compactJson.status.toLowerCase()) } } + + // wait compaction to start + Thread.sleep(10000) // wait for all compactions done for (def tablet in tablets) { diff --git a/regression-test/suites/compaction/test_compaction_agg_keys.groovy b/regression-test/suites/compaction/test_compaction_agg_keys.groovy index 50c79eb7a2e50d..d718509e89fea9 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys.groovy @@ -121,20 +121,12 @@ suite("test_compaction_agg_keys") { } } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } def replicaNum = get_table_replica_num(tableName) diff --git a/regression-test/suites/compaction/test_compaction_agg_keys_with_array_map.groovy b/regression-test/suites/compaction/test_compaction_agg_keys_with_array_map.groovy index 1556d2f00a506f..fa635d9c72330e 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys_with_array_map.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys_with_array_map.groovy @@ -112,6 +112,8 @@ suite("test_compaction_agg_keys_with_array_map") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy index 1610587602d575..dc056fa53adfc3 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy @@ -131,6 +131,8 @@ suite("test_compaction_agg_keys_with_delete") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_compaction_dup_keys.groovy b/regression-test/suites/compaction/test_compaction_dup_keys.groovy index 458185ba8069ac..c73b9ef6dbe227 100644 --- a/regression-test/suites/compaction/test_compaction_dup_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_dup_keys.groovy @@ -120,6 +120,8 @@ suite("test_compaction_dup_keys") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy index 2e34086172a553..12995b710ccece 100644 --- a/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy @@ -132,6 +132,8 @@ suite("test_compaction_dup_keys_with_delete") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys.groovy index 21fd16403ac79f..04416131173be5 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys.groovy @@ -120,18 +120,12 @@ suite("test_compaction_uniq_keys") { } } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } def replicaNum = get_table_replica_num(tableName) diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy index b65557b059c800..76d23ce4d5e395 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy @@ -124,6 +124,8 @@ suite("test_compaction_uniq_keys_ck") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy index 82b8f2b8d5a06b..bde5e2ec5c8721 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy @@ -201,6 +201,8 @@ suite("test_compaction_uniq_keys_row_store", "p0") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy index 5145e810c1b020..4da21f55444a09 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy @@ -203,6 +203,8 @@ suite("test_compaction_uniq_keys_row_store_ck", "p0") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy index 18a46422d3292b..3ea2d75674827a 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy @@ -135,6 +135,8 @@ suite("test_compaction_uniq_keys_with_delete") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy index 21af1a9220788c..6ab33ac9b7c1fb 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy @@ -152,6 +152,8 @@ suite("test_compaction_uniq_keys_with_delete_ck") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_compaction_with_visible_version.groovy b/regression-test/suites/compaction/test_compaction_with_visible_version.groovy index ddf80e096425c7..049a280da21914 100644 --- a/regression-test/suites/compaction/test_compaction_with_visible_version.groovy +++ b/regression-test/suites/compaction/test_compaction_with_visible_version.groovy @@ -108,6 +108,8 @@ suite('test_compaction_with_visible_version', 'docker') { triggerCompaction it, isCumuCompactSucc, 'cumulative' } + Thread.sleep(10000) + if (isCumuCompactSucc) { // wait compaction done def startTs = System.currentTimeMillis() diff --git a/regression-test/suites/compaction/test_full_compaction.groovy b/regression-test/suites/compaction/test_full_compaction.groovy index b54f92747e6c4e..27847cfe231dd2 100644 --- a/regression-test/suites/compaction/test_full_compaction.groovy +++ b/regression-test/suites/compaction/test_full_compaction.groovy @@ -139,11 +139,12 @@ suite("test_full_compaction") { } } + Thread.sleep(10000) + // wait for full compaction done for (def tablet in tablets) { boolean running = true do { - Thread.sleep(1000) String tablet_id = tablet.TabletId backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) diff --git a/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy b/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy index 222c7e7138865b..7dba951bba0652 100644 --- a/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy +++ b/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy @@ -147,6 +147,8 @@ suite("test_full_compaction_by_table_id") { } } + Thread.sleep(10000) + // wait for full compaction done { for (def tablet : tablets) { diff --git a/regression-test/suites/compaction/test_full_compaction_ck.groovy b/regression-test/suites/compaction/test_full_compaction_ck.groovy index ae6b467acafcb9..9f6f39bfd1a95a 100644 --- a/regression-test/suites/compaction/test_full_compaction_ck.groovy +++ b/regression-test/suites/compaction/test_full_compaction_ck.groovy @@ -140,6 +140,8 @@ suite("test_full_compaction_ck") { } } + Thread.sleep(10000) + // wait for full compaction done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy b/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy index 8a661a00c13df1..6782d091d1d04b 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy @@ -132,6 +132,8 @@ suite("test_vertical_compaction_agg_keys") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/compaction/test_vertical_compaction_agg_state.groovy b/regression-test/suites/compaction/test_vertical_compaction_agg_state.groovy index 40a86812fd265b..fb9168feb5884d 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_agg_state.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_agg_state.groovy @@ -93,6 +93,8 @@ suite("test_vertical_compaction_agg_state") { } } + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { boolean running = true diff --git a/regression-test/suites/delete_p0/test_delete_sign_with_cumu_compaction.groovy b/regression-test/suites/delete_p0/test_delete_sign_with_cumu_compaction.groovy index eca0ed41128cc8..8f9ddb68101bfc 100644 --- a/regression-test/suites/delete_p0/test_delete_sign_with_cumu_compaction.groovy +++ b/regression-test/suites/delete_p0/test_delete_sign_with_cumu_compaction.groovy @@ -68,7 +68,7 @@ suite('test_delete_sign_with_cumu_compaction') { def waitForCompaction = { be_host, be_http_port -> // wait for all compactions done - Awaitility.await().atMost(30, SECONDS).pollInterval(1, SECONDS).until { + Awaitility.await().atMost(30, SECONDS).pollDelay(10, TimeUnit.SECONDS).pollInterval(1, SECONDS).untilAsserted { String tablet_id = tablet[0] StringBuilder sb = new StringBuilder(); sb.append("curl -X GET http://${be_host}:${be_http_port}") @@ -85,7 +85,7 @@ suite('test_delete_sign_with_cumu_compaction') { def compactionStatus = parseJson(out.trim()) assertEquals("success", compactionStatus.status.toLowerCase()) - !compactionStatus.run_status + assert !compactionStatus.run_status } } diff --git a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_compaction_conflict.groovy b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_compaction_conflict.groovy index 8f4fa45700b81f..a769730f576f4a 100644 --- a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_compaction_conflict.groovy +++ b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_compaction_conflict.groovy @@ -92,7 +92,7 @@ suite("test_cloud_mow_stale_resp_load_compaction_conflict", "nonConcurrent") { Assert.assertEquals("success", compactJson.status.toLowerCase()) // wait for full compaction to complete - Awaitility.await().atMost(3, TimeUnit.SECONDS).pollDelay(200, TimeUnit.MILLISECONDS).pollInterval(100, TimeUnit.MILLISECONDS).until( + Awaitility.await().atMost(30, TimeUnit.SECONDS).pollDelay(10, TimeUnit.SECONDS).pollInterval(1, TimeUnit.SECONDS).until( { (code, out, err) = be_get_compaction_status(tabletBackend.Host, tabletBackend.HttpPort, tabletId) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) diff --git a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_compaction_with_higher_version.groovy b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_compaction_with_higher_version.groovy index b5ae14957a706f..34ff11f28755b3 100644 --- a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_compaction_with_higher_version.groovy +++ b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_compaction_with_higher_version.groovy @@ -172,7 +172,7 @@ suite("test_partial_update_compaction_with_higher_version", "nonConcurrent") { Assert.assertEquals("success", compactJson.status.toLowerCase()) // wait for full compaction to complete - Awaitility.await().atMost(3, TimeUnit.SECONDS).pollDelay(200, TimeUnit.MILLISECONDS).pollInterval(100, TimeUnit.MILLISECONDS).until( + Awaitility.await().atMost(30, TimeUnit.SECONDS).pollDelay(10, TimeUnit.SECONDS).pollInterval(1, TimeUnit.SECONDS).until( { (code, out, err) = be_get_compaction_status(tabletBackend.Host, tabletBackend.HttpPort, tabletId) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) diff --git a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_conflict_skip_compaction.groovy b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_conflict_skip_compaction.groovy index 08eba337af3327..3974918a4de08a 100644 --- a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_conflict_skip_compaction.groovy +++ b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_conflict_skip_compaction.groovy @@ -155,7 +155,7 @@ suite("test_partial_update_conflict_skip_compaction", "nonConcurrent") { Assert.assertEquals("success", compactJson.status.toLowerCase()) // wait for full compaction to complete - Awaitility.await().atMost(3, TimeUnit.SECONDS).pollDelay(200, TimeUnit.MILLISECONDS).pollInterval(100, TimeUnit.MILLISECONDS).until( + Awaitility.await().atMost(30, TimeUnit.SECONDS).pollDelay(10, TimeUnit.SECONDS).pollInterval(1, TimeUnit.SECONDS).until( { (code, out, err) = be_get_compaction_status(tabletBackend.Host, tabletBackend.HttpPort, tabletId) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) diff --git a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_skip_compaction.groovy b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_skip_compaction.groovy index d816c30f7e9bd8..54589864d77b4c 100644 --- a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_skip_compaction.groovy +++ b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_skip_compaction.groovy @@ -144,7 +144,7 @@ suite("test_partial_update_skip_compaction", "nonConcurrent") { Assert.assertEquals("success", compactJson.status.toLowerCase()) // wait for full compaction to complete - Awaitility.await().atMost(3, TimeUnit.SECONDS).pollDelay(200, TimeUnit.MILLISECONDS).pollInterval(100, TimeUnit.MILLISECONDS).until( + Awaitility.await().atMost(30, TimeUnit.SECONDS).pollDelay(10, TimeUnit.SECONDS).pollInterval(1, TimeUnit.SECONDS).until( { (code, out, err) = be_get_compaction_status(tabletBackend.Host, tabletBackend.HttpPort, tabletId) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) diff --git a/regression-test/suites/fault_injection_p0/test_variant_bloom_filter.groovy b/regression-test/suites/fault_injection_p0/test_variant_bloom_filter.groovy index 88c529d685dfe8..9140f04e4613a8 100644 --- a/regression-test/suites/fault_injection_p0/test_variant_bloom_filter.groovy +++ b/regression-test/suites/fault_injection_p0/test_variant_bloom_filter.groovy @@ -98,18 +98,12 @@ suite("test_variant_bloom_filter", "nonConcurrent") { assertEquals("success", compactJson.status.toLowerCase()) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().atMost(3, TimeUnit.MINUTES).untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatusAtMost(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId, 3, TimeUnit.MINUTES) } for (def tablet in tablets) { diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_empty_segments.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_empty_segments.groovy index 1c70c9e8e5027b..78022e0a846117 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_empty_segments.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_empty_segments.groovy @@ -68,18 +68,12 @@ suite("test_index_compaction_empty_segments", "p0, nonConcurrent") { assertEquals("success", compactJson.status.toLowerCase()) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().atMost(10, TimeUnit.MINUTES).untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatusAtMost(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId, 10, TimeUnit.MINUTES) } diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy index e178d08baadd75..aefd2cc95edf6d 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy @@ -116,18 +116,12 @@ suite("test_index_compaction_p0", "p0, nonConcurrent") { assertEquals("success", compactJson.status.toLowerCase()) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().atMost(1, TimeUnit.MINUTES).untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatusAtMost(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId, 1, TimeUnit.MINUTES) } diff --git a/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy b/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy index c55ae3c233bec0..7e9ff1ef1507f1 100644 --- a/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy +++ b/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy @@ -121,18 +121,12 @@ suite("test_index_compaction_p1", "p1, nonConcurrent") { assertEquals("success", compactJson.status.toLowerCase()) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().atMost(10, TimeUnit.MINUTES).untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatusAtMost(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId, 10, TimeUnit.MINUTES) } for (def tablet in tablets) { diff --git a/regression-test/suites/inverted_index_p1/show_data/test_show_index_data.groovy b/regression-test/suites/inverted_index_p1/show_data/test_show_index_data.groovy index 671fe907782f2c..9754313b8eef8b 100644 --- a/regression-test/suites/inverted_index_p1/show_data/test_show_index_data.groovy +++ b/regression-test/suites/inverted_index_p1/show_data/test_show_index_data.groovy @@ -143,18 +143,11 @@ suite("test_show_index_data", "p1") { assertEquals("success", compactJson.status.toLowerCase()) } + Thread.sleep(30000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().atMost(30, TimeUnit.MINUTES).untilAsserted(() -> { - Thread.sleep(30000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) - }); + assertCompactionStatusAtMost(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId, 30, TimeUnit.MINUTES) } @@ -244,10 +237,10 @@ suite("test_show_index_data", "p1") { } } sql """ alter table ${show_table_name} drop column clientip""" - Awaitility.await().atMost(30, TimeUnit.MINUTES).untilAsserted(() -> { - Thread.sleep(30000) - tablets = sql_return_maparray """ show tablets from ${show_table_name}; """ - for (def tablet in tablets) { + Thread.sleep(30000) + + for (def tablet in tablets) { + Awaititly.await().pollInterval(2, TimeUnit.SECONDS).atMost(30, TimeUnit.MINUTES).untilAsserted({ String tablet_id = tablet.TabletId (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) @@ -263,8 +256,8 @@ suite("test_show_index_data", "p1") { logger.info("rowsetid: " + rowsetid) assertTrue(!rowsetids.contains(rowsetid)) } - } - }); + }) + } } def build_index = { @@ -292,10 +285,10 @@ suite("test_show_index_data", "p1") { if (!isCloudMode()) { sql """ build index status_idx on ${show_table_name}""" } - Awaitility.await().atMost(30, TimeUnit.MINUTES).untilAsserted(() -> { - Thread.sleep(30000) - tablets = sql_return_maparray """ show tablets from ${show_table_name}; """ - for (def tablet in tablets) { + Thread.Sleep(10000) + + for (def tablet in tablets) { + Awaititly.await().pollInterval(2, TimeUnit.SECONDS).atMost(30, TimeUnit.MINUTES).untilAsserted({ String tablet_id = tablet.TabletId (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) @@ -311,8 +304,8 @@ suite("test_show_index_data", "p1") { logger.info("rowsetid: " + rowsetid) assertTrue(!rowsetids.contains(rowsetid)) } - } - }); + }) + } } def drop_index = { @@ -336,10 +329,10 @@ suite("test_show_index_data", "p1") { } } sql """ DROP INDEX status_idx on ${show_table_name}""" - Awaitility.await().atMost(30, TimeUnit.MINUTES).untilAsserted(() -> { - Thread.sleep(30000) - tablets = sql_return_maparray """ show tablets from ${show_table_name}; """ - for (def tablet in tablets) { + Thread.sleep(30000) + + for (def tablet in tablets) { + Awaititly.await().pollInterval(2, TimeUnit.SECONDS).atMost(30, TimeUnit.MINUTES).untilAsserted({ String tablet_id = tablet.TabletId (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) @@ -355,8 +348,8 @@ suite("test_show_index_data", "p1") { logger.info("rowsetid: " + rowsetid) assertTrue(!rowsetids.contains(rowsetid)) } - } - }); + }) + } } // 1. load data diff --git a/regression-test/suites/inverted_index_p2/show_data/test_show_index_data_p2.groovy b/regression-test/suites/inverted_index_p2/show_data/test_show_index_data_p2.groovy index 2839a8a47b04aa..2b3a9abdb2d877 100644 --- a/regression-test/suites/inverted_index_p2/show_data/test_show_index_data_p2.groovy +++ b/regression-test/suites/inverted_index_p2/show_data/test_show_index_data_p2.groovy @@ -243,10 +243,8 @@ suite("test_show_index_data_p2", "p2") { } } sql """ alter table ${show_table_name} drop column clientip""" - Awaitility.await().atMost(60, TimeUnit.MINUTES).untilAsserted(() -> { - Thread.sleep(30000) - tablets = sql_return_maparray """ show tablets from ${show_table_name}; """ - for (def tablet in tablets) { + for (def tablet in tablets) { + Awaititly.await().pollInterval(2, TimeUnit.SECONDS).atMost(30, TimeUnit.MINUTES).untilAsserted({ String tablet_id = tablet.TabletId (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) @@ -262,8 +260,8 @@ suite("test_show_index_data_p2", "p2") { logger.info("rowsetid: " + rowsetid) assertTrue(!rowsetids.contains(rowsetid)) } - } - }); + }) + } } def build_index = { @@ -290,10 +288,10 @@ suite("test_show_index_data_p2", "p2") { if (!isCloudMode()) { sql """ build index status_idx on ${show_table_name}""" } - Awaitility.await().atMost(60, TimeUnit.MINUTES).untilAsserted(() -> { - Thread.sleep(30000) - tablets = sql_return_maparray """ show tablets from ${show_table_name}; """ - for (def tablet in tablets) { + Thread.sleep(30000) + + for (def tablet in tablets) { + Awaititly.await().pollInterval(2, TimeUnit.SECONDS).atMost(30, TimeUnit.MINUTES).untilAsserted({ String tablet_id = tablet.TabletId (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) @@ -309,8 +307,8 @@ suite("test_show_index_data_p2", "p2") { logger.info("rowsetid: " + rowsetid) assertTrue(!rowsetids.contains(rowsetid)) } - } - }); + }) + } } def drop_index = { @@ -334,10 +332,9 @@ suite("test_show_index_data_p2", "p2") { } } sql """ DROP INDEX status_idx on ${show_table_name}""" - Awaitility.await().atMost(60, TimeUnit.MINUTES).untilAsserted(() -> { - Thread.sleep(30000) - tablets = sql_return_maparray """ show tablets from ${show_table_name}; """ - for (def tablet in tablets) { + Thread.sleep(30000) + for (def tablet in tablets) { + Awaititly.await().pollInterval(2, TimeUnit.SECONDS).atMost(30, TimeUnit.MINUTES).untilAsserted({ String tablet_id = tablet.TabletId (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) @@ -353,8 +350,8 @@ suite("test_show_index_data_p2", "p2") { logger.info("rowsetid: " + rowsetid) assertTrue(!rowsetids.contains(rowsetid)) } - } - }); + }) + } } // 1. load data diff --git a/regression-test/suites/schema_change/test_alter_table_column_with_delete_drop_column_dup_key.groovy b/regression-test/suites/schema_change/test_alter_table_column_with_delete_drop_column_dup_key.groovy index 1f9336ae3b3492..bf44c7f2d29c73 100644 --- a/regression-test/suites/schema_change/test_alter_table_column_with_delete_drop_column_dup_key.groovy +++ b/regression-test/suites/schema_change/test_alter_table_column_with_delete_drop_column_dup_key.groovy @@ -50,7 +50,7 @@ suite("test_alter_table_column_with_delete_drop_column_dup_key", "schema_change" """ int max_try_secs = 1200 String res = "NOT_FINISHED" - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -66,7 +66,7 @@ suite("test_alter_table_column_with_delete_drop_column_dup_key", "schema_change" ADD COLUMN value3 CHAR(100) DEFAULT 'A'; """ max_try_secs = 1200 - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -113,7 +113,7 @@ suite("test_alter_table_column_with_delete_drop_column_dup_key", "schema_change" ALTER TABLE ${tbName1} DROP COLUMN value3; """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -128,7 +128,7 @@ suite("test_alter_table_column_with_delete_drop_column_dup_key", "schema_change" ALTER TABLE ${tbName1} ADD COLUMN value3 CHAR(100) DEFAULT 'A'; """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -146,7 +146,7 @@ suite("test_alter_table_column_with_delete_drop_column_dup_key", "schema_change" ALTER TABLE ${tbName1} ADD COLUMN k2 CHAR(10) KEY DEFAULT 'A'; """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -190,7 +190,7 @@ suite("test_alter_table_column_with_delete_drop_column_dup_key", "schema_change" ALTER TABLE ${tbName1} DROP COLUMN value3; """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -206,7 +206,7 @@ suite("test_alter_table_column_with_delete_drop_column_dup_key", "schema_change" ADD COLUMN value3 CHAR(100) DEFAULT 'A'; """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) diff --git a/regression-test/suites/schema_change/test_alter_table_column_with_delete_drop_column_unique_key.groovy b/regression-test/suites/schema_change/test_alter_table_column_with_delete_drop_column_unique_key.groovy index 2026b87c367f4c..5b1f4c154668cf 100644 --- a/regression-test/suites/schema_change/test_alter_table_column_with_delete_drop_column_unique_key.groovy +++ b/regression-test/suites/schema_change/test_alter_table_column_with_delete_drop_column_unique_key.groovy @@ -52,7 +52,7 @@ suite("test_alter_table_column_with_delete_drop_column_unique_key", "schema_chan """ int max_try_secs = 1200 String res = "NOT_FINISHED" - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -67,7 +67,7 @@ suite("test_alter_table_column_with_delete_drop_column_unique_key", "schema_chan ALTER TABLE ${tbName1} ADD COLUMN value3 CHAR(100) DEFAULT 'A'; """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -115,7 +115,7 @@ suite("test_alter_table_column_with_delete_drop_column_unique_key", "schema_chan ALTER TABLE ${tbName1} DROP COLUMN k2; """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -130,7 +130,7 @@ suite("test_alter_table_column_with_delete_drop_column_unique_key", "schema_chan ALTER TABLE ${tbName1} ADD COLUMN value3 CHAR(100) DEFAULT 'A'; """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) @@ -148,7 +148,7 @@ suite("test_alter_table_column_with_delete_drop_column_unique_key", "schema_chan ALTER TABLE ${tbName1} ADD COLUMN k2 CHAR(10) KEY DEFAULT 'A'; """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { res = getJobState(tbName1) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) diff --git a/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy b/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy index 8afc4fd6c110ab..f8854b02c1ddc6 100644 --- a/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy +++ b/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy @@ -49,18 +49,12 @@ suite("test_agg_keys_schema_change_datev2") { logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatusAtMost(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId, 20, TimeUnit.SECONDS) } } @@ -94,7 +88,7 @@ suite("test_agg_keys_schema_change_datev2") { sql """ alter table ${tbName} add column `datev3` datev2 DEFAULT '2022-01-01' """ int max_try_secs = 300 - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -111,7 +105,7 @@ suite("test_agg_keys_schema_change_datev2") { sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" sql """ alter table ${tbName} drop column `datev3` """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -134,7 +128,7 @@ suite("test_agg_keys_schema_change_datev2") { sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" sql """ alter table ${tbName} add column `datev3` datetimev2 DEFAULT '2022-01-01 11:11:11' """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -150,7 +144,7 @@ suite("test_agg_keys_schema_change_datev2") { sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" sql """ alter table ${tbName} drop column `datev3` """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -174,7 +168,7 @@ suite("test_agg_keys_schema_change_datev2") { qt_sql """select * from ${tbName} ORDER BY `datek1`;""" sql """ alter table ${tbName} add column `datev3` datetimev2(3) DEFAULT '2022-01-01 11:11:11.111' """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -200,7 +194,7 @@ suite("test_agg_keys_schema_change_datev2") { qt_sql """select * from ${tbName} ORDER BY `datek1`;""" sql """ alter table ${tbName} drop column `datev3` """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; diff --git a/regression-test/suites/schema_change_p0/datev2/test_schema_change_varchar_to_datev2.groovy b/regression-test/suites/schema_change_p0/datev2/test_schema_change_varchar_to_datev2.groovy index d01d8cd54f3747..e18fdad3ffa76d 100644 --- a/regression-test/suites/schema_change_p0/datev2/test_schema_change_varchar_to_datev2.groovy +++ b/regression-test/suites/schema_change_p0/datev2/test_schema_change_varchar_to_datev2.groovy @@ -48,18 +48,12 @@ suite("test_schema_change_varchar_to_datev2") { logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } } @@ -85,7 +79,7 @@ suite("test_schema_change_varchar_to_datev2") { sql """ alter table ${tbName} modify column `k3` date; """ int max_try_secs = 300 - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; diff --git a/regression-test/suites/schema_change_p0/decimalv2/test_agg_keys_schema_change_decimalv2.groovy b/regression-test/suites/schema_change_p0/decimalv2/test_agg_keys_schema_change_decimalv2.groovy index 295a034d8eee90..0d42cc568123a1 100644 --- a/regression-test/suites/schema_change_p0/decimalv2/test_agg_keys_schema_change_decimalv2.groovy +++ b/regression-test/suites/schema_change_p0/decimalv2/test_agg_keys_schema_change_decimalv2.groovy @@ -62,18 +62,12 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } } @@ -99,7 +93,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { sql """ alter table ${tbName} add column `decimalv2v3` decimalv2(27,9) """ int max_try_secs = 300 - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -114,7 +108,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { qt_sql3 """select * from ${tbName} ORDER BY 1,2,3,4;""" sql """ alter table ${tbName} drop column `decimalv2v3` """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -127,7 +121,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { // DECIMALV2(21,3) -> decimalv3 OK sql """ alter table ${tbName} modify column decimalv2k2 DECIMALV3(21,3) key """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -140,7 +134,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { // DECIMALV2(21,3) -> decimalv3 OK sql """ alter table ${tbName} modify column decimalv2k3 DECIMALV3(38,10) key """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -153,7 +147,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { // DECIMALV2(27,9) -> decimalv3, round scale part, not overflow sql """ alter table ${tbName} modify column decimalv2v1 DECIMALV3(26,8) sum """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -167,7 +161,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { // DECIMALV2(21,3) -> decimalv3, round scale part, overflow sql """ alter table ${tbName} modify column decimalv2v2 DECIMALV3(20,2) sum """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "CANCELLED") { return true; @@ -181,7 +175,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { // DECIMALV2(21,3) -> decimalv3, narrow integral, overflow sql """ alter table ${tbName} modify column decimalv2v2 DECIMALV3(20,3) sum """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "CANCELLED") { return true; @@ -196,7 +190,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { // DECIMALV3(21,3) -> decimalv2 OK sql """ alter table ${tbName} modify column decimalv2k2 DECIMALV2(21,3) key """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -210,7 +204,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { // DECIMALV3(26,8) -> decimalv2 sql """ alter table ${tbName} modify column decimalv2v1 DECIMALV2(25,7) sum """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -224,7 +218,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { // DECIMALV3(26,8) -> decimalv2, narrow integer sql """ alter table ${tbName} modify column decimalv2v1 DECIMALV2(25,8) sum """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(500, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; diff --git a/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy b/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy index fd28d01ed4595f..7dce8f217a7ef2 100644 --- a/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy +++ b/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy @@ -49,18 +49,14 @@ suite("test_agg_keys_schema_change_decimalv3") { logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) } + // wait compaction to start. + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + def tid = tablet[0] + def beid = tablet[2] + assertCompactionStatus(backendId_to_backendIP.get(beid), backendId_to_backendHttpPort.get(tablet[2]), tid) } } @@ -83,7 +79,7 @@ suite("test_agg_keys_schema_change_decimalv3") { sql """ alter table ${tbName} add column `decimalv3v3` DECIMALV3(38,4) """ int max_try_secs = 300 - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -97,7 +93,7 @@ suite("test_agg_keys_schema_change_decimalv3") { sql """sync""" qt_sql """select * from ${tbName} ORDER BY `decimalv3k1`;""" sql """ alter table ${tbName} drop column `decimalv3v3` """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -108,7 +104,7 @@ suite("test_agg_keys_schema_change_decimalv3") { sql """sync""" qt_sql """select * from ${tbName} ORDER BY `decimalv3k1`;""" sql """ alter table ${tbName} modify column decimalv3k2 DECIMALV3(19,3) key """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "CANCELLED") { return true; @@ -121,7 +117,7 @@ suite("test_agg_keys_schema_change_decimalv3") { qt_sql """select * from ${tbName} ORDER BY `decimalv3k1`;""" sql """ alter table ${tbName} modify column decimalv3k2 DECIMALV3(38,10) key """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "CANCELLED") { return true; @@ -133,7 +129,7 @@ suite("test_agg_keys_schema_change_decimalv3") { qt_sql """select * from ${tbName} ORDER BY `decimalv3k1`;""" sql """ alter table ${tbName} modify column decimalv3k2 DECIMALV3(16,3) key """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "CANCELLED") { return true; diff --git a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy index 6fe76138cd28ca..c7204e90a5b71d 100644 --- a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy @@ -101,7 +101,7 @@ suite ("test_dup_keys_schema_change") { ALTER TABLE ${tableName} DROP COLUMN sex """ int max_try_time = 300 - Awaitility.await().atMost(max_try_time, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_time, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tableName) if (result == "FINISHED") { return true; @@ -149,18 +149,14 @@ suite ("test_dup_keys_schema_change") { //assertEquals(code, 0) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> { - String tablet_id = tablet[0] - def backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + def tid = tablet[0] + def beid = tablet[2] + assertCompactionStatus(backendId_to_backendIP.get(beid), backendId_to_backendHttpPort.get(tablet[2]), tid) } qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy index 7c3ea7703272bb..ff938f79bc1234 100644 --- a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy @@ -27,7 +27,7 @@ suite ("test_dup_mv_schema_change") { } def waitForJob = (tbName, timeout) -> { - Awaitility.await().atMost(timeout, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(timeout, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { return true; @@ -159,18 +159,12 @@ suite ("test_dup_mv_schema_change") { //assertEquals(code, 0) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy index 7f55b7fcd16e2c..5bf2e056386cf9 100644 --- a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy @@ -29,7 +29,7 @@ suite ("test_dup_rollup_schema_change") { return jobStateResult[0][9] } def waitForMVJob = (tbName, timeout) -> { - Awaitility.await().atMost(timeout, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(timeout, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getMVJobState(tbName) if (result == "FINISHED") { return true; @@ -177,18 +177,12 @@ suite ("test_dup_rollup_schema_change") { //assertEquals(code, 0) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy index e1914bb6c8f249..4edcaaa30b0404 100644 --- a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy @@ -137,18 +137,14 @@ suite ("test_dup_vals_schema_change") { //assertEquals(code, 0) } + // wait compaction to start. + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + def tid = tablet[0] + def beid = tablet[2] + assertCompactionStatus(backendId_to_backendIP.get(beid), backendId_to_backendHttpPort.get(tablet[2]), tid) } qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy index 45c37051b43ec5..d44d68a28cd71c 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy @@ -131,19 +131,15 @@ suite ("test_uniq_keys_schema_change") { logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) //assertEquals(code, 0) } + + // waiting compaction to start. + Thread.sleep(10000) // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + def tid = tablet[0] + def beid = tablet[2] + assertCompactionStatus(backendId_to_backendIP.get(beid), backendId_to_backendHttpPort.get(tablet[2]), tid) } qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy index f2c961b5aa4c16..f63c9f377301f6 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy @@ -26,7 +26,7 @@ suite ("test_uniq_mv_schema_change") { return jobStateResult[0][8] } def waitForJob = (tbName, timeout) -> { - Awaitility.await().atMost(timeout, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(timeout, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getMVJobState(tbName) if (result == "FINISHED") { return true; @@ -175,18 +175,12 @@ suite ("test_uniq_mv_schema_change") { //assertEquals(code, 0) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy index 6fb74ceda4bb53..e46d4088f976bb 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy @@ -30,7 +30,7 @@ suite ("test_uniq_rollup_schema_change") { return jobStateResult[0][9] } def waitForMVJob = (tbName, timeout) -> { - Awaitility.await().atMost(timeout, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(timeout, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getMVJobState(tbName) if (result == "FINISHED") { return true; @@ -132,7 +132,7 @@ suite ("test_uniq_rollup_schema_change") { """ def max_try_time = 300 - Awaitility.await().atMost(max_try_time, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_time, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tableName) if (result == "FINISHED") { return true; @@ -178,18 +178,12 @@ suite ("test_uniq_rollup_schema_change") { //assertEquals(code, 0) } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy index 9ca8111d0ff737..eaca601f0597ed 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy @@ -142,16 +142,7 @@ suite ("test_uniq_vals_schema_change") { // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet[2]), backendId_to_backendHttpPort.get(tablet[2]), tablet[0]) } qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy index 2bb5e823e8bd17..c27bde816c9a39 100644 --- a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy @@ -66,7 +66,7 @@ suite ("test_varchar_schema_change") { sql """ alter table ${tableName} modify column c2 varchar(30) """ int max_try_secs = 300 - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tableName) if (result == "FINISHED") { return true; @@ -89,7 +89,7 @@ suite ("test_varchar_schema_change") { sql """ insert into ${tableName} values(55,'2019-11-21',21474,'123aa') """ sql """ alter table ${tableName} modify column c2 INT """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tableName) if (result == "CANCELLED" || result == "FINISHED") { assertEquals(result, "CANCELLED") @@ -117,18 +117,14 @@ suite ("test_varchar_schema_change") { logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) } + // wait compaction to start. + Thread.sleep(10000) + // wait for all compactions done for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + def tid = tablet[0] + def beid = tablet[2] + assertCompactionStatus(backendId_to_backendIP.get(beid), backendId_to_backendHttpPort.get(tablet[2]), tid) } qt_sc " select * from ${tableName} order by 1,2; " @@ -140,7 +136,7 @@ suite ("test_varchar_schema_change") { modify column c2 varchar(40), modify column c3 varchar(6) DEFAULT '0' """ - Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { String result = getJobState(tableName) if (result == "FINISHED") { return true; diff --git a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_insert_light_schema_change.groovy b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_insert_light_schema_change.groovy index 135c18f4fc7141..1d35996c1c721a 100644 --- a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_insert_light_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_insert_light_schema_change.groovy @@ -65,7 +65,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " def try_times=1200 // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -125,7 +125,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -205,7 +205,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -243,7 +243,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { // schema change sql """ ALTER table ${tableName} ADD COLUMN c1 int key default "0"; """ // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -254,7 +254,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -265,7 +265,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c3 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -321,7 +321,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; diff --git a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_insert_schema_change.groovy b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_insert_schema_change.groovy index 62140ac58bf1cf..9d6a7f1e78ded4 100644 --- a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_insert_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_insert_schema_change.groovy @@ -57,7 +57,7 @@ suite("test_partial_update_insert_schema_change", "p0") { sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " def try_times=1200 // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -117,7 +117,7 @@ suite("test_partial_update_insert_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -196,7 +196,7 @@ suite("test_partial_update_insert_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -233,7 +233,7 @@ suite("test_partial_update_insert_schema_change", "p0") { // schema change sql """ ALTER table ${tableName} ADD COLUMN c1 int key default "0"; """ // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -244,7 +244,7 @@ suite("test_partial_update_insert_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -255,7 +255,7 @@ suite("test_partial_update_insert_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c3 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -312,7 +312,7 @@ suite("test_partial_update_insert_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; diff --git a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_schema_change.groovy b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_schema_change.groovy index d2e27699524635..2382b643f0cdae 100644 --- a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_schema_change.groovy @@ -79,7 +79,7 @@ suite("test_partial_update_schema_change", "p0") { sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " def try_times=1200 // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -203,7 +203,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -327,7 +327,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -410,7 +410,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} ADD COLUMN c1 int key null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -420,7 +420,7 @@ suite("test_partial_update_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -512,7 +512,7 @@ suite("test_partial_update_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -689,7 +689,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -811,7 +811,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -928,7 +928,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -1009,7 +1009,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} ADD COLUMN c1 int key null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -1018,7 +1018,7 @@ suite("test_partial_update_schema_change", "p0") { }); sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -1105,7 +1105,7 @@ suite("test_partial_update_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; diff --git a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_schema_change_row_store.groovy b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_schema_change_row_store.groovy index cffb682488a2d3..5c4aae467f1ec9 100644 --- a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_schema_change_row_store.groovy +++ b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_schema_change_row_store.groovy @@ -80,7 +80,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " def try_times = 1200 // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -206,7 +206,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -331,7 +331,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -414,7 +414,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} ADD COLUMN c1 int key null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -424,7 +424,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -517,7 +517,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -696,7 +696,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -820,7 +820,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -939,7 +939,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -1022,7 +1022,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} ADD COLUMN c1 int key null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -1031,7 +1031,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { }); sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -1119,7 +1119,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy index d92dc41a68bc34..b2193b19fe36b8 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy @@ -60,7 +60,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " def try_times=1200 // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -118,7 +118,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -194,7 +194,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -232,7 +232,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { // schema change sql """ ALTER table ${tableName} ADD COLUMN c1 int key default "0"; """ // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -243,7 +243,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -254,7 +254,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c3 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -307,7 +307,7 @@ suite("test_partial_update_insert_light_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_schema_change.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_schema_change.groovy index 9e0abc9704c482..cc9755442b0a52 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_schema_change.groovy @@ -51,7 +51,7 @@ suite("test_partial_update_insert_schema_change", "p0") { sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " def try_times=1200 // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -108,7 +108,7 @@ suite("test_partial_update_insert_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -181,7 +181,7 @@ suite("test_partial_update_insert_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -217,7 +217,7 @@ suite("test_partial_update_insert_schema_change", "p0") { // schema change sql """ ALTER table ${tableName} ADD COLUMN c1 int key default "0"; """ // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -228,7 +228,7 @@ suite("test_partial_update_insert_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -239,7 +239,7 @@ suite("test_partial_update_insert_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c3 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -292,7 +292,7 @@ suite("test_partial_update_insert_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change.groovy index 864a97e13bfdc5..012b784d98de52 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change.groovy @@ -74,7 +74,7 @@ suite("test_partial_update_schema_change", "p0") { sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " def try_times=1200 // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -194,7 +194,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -314,7 +314,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -396,7 +396,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} ADD COLUMN c1 int key null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -406,7 +406,7 @@ suite("test_partial_update_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -496,7 +496,7 @@ suite("test_partial_update_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -667,7 +667,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -785,7 +785,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -899,7 +899,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -979,7 +979,7 @@ suite("test_partial_update_schema_change", "p0") { // schema change sql " ALTER table ${tableName} ADD COLUMN c1 int key null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -988,7 +988,7 @@ suite("test_partial_update_schema_change", "p0") { }); sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -1073,7 +1073,7 @@ suite("test_partial_update_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change_row_store.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change_row_store.groovy index 2ce042f8b6391c..2ae0ef8a3a56c9 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change_row_store.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change_row_store.groovy @@ -75,7 +75,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " def try_times = 1200 // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -197,7 +197,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -318,7 +318,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -400,7 +400,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} ADD COLUMN c1 int key null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -410,7 +410,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -501,7 +501,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -674,7 +674,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} add column c10 INT DEFAULT '0' " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -794,7 +794,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} DROP COLUMN c8 " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -910,7 +910,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} MODIFY COLUMN c2 double " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -992,7 +992,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { // schema change sql " ALTER table ${tableName} ADD COLUMN c1 int key null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -1001,7 +1001,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { }); sql " ALTER table ${tableName} ADD COLUMN c2 int null " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; @@ -1087,7 +1087,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { sql " CREATE INDEX test ON ${tableName} (c1) USING BITMAP " // if timeout awaitility will raise exception - Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { + Awaitility.await().atMost(try_times, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).until(() -> { def res = sql " SHOW ALTER TABLE COLUMN WHERE TableName = '${tableName}' ORDER BY CreateTime DESC LIMIT 1 " if(res[0][9].toString() == "FINISHED"){ return true; diff --git a/regression-test/suites/variant_p0/compaction/test_compaction.groovy b/regression-test/suites/variant_p0/compaction/test_compaction.groovy index 2499de5712d934..aa5a9018d55221 100644 --- a/regression-test/suites/variant_p0/compaction/test_compaction.groovy +++ b/regression-test/suites/variant_p0/compaction/test_compaction.groovy @@ -105,18 +105,12 @@ suite("test_compaction_variant") { } } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } int rowCount = 0 diff --git a/regression-test/suites/variant_p1/compaction/compaction_sparse_column.groovy b/regression-test/suites/variant_p1/compaction/compaction_sparse_column.groovy index 91f64c19a02d22..45135b6c5f5110 100644 --- a/regression-test/suites/variant_p1/compaction/compaction_sparse_column.groovy +++ b/regression-test/suites/variant_p1/compaction/compaction_sparse_column.groovy @@ -124,18 +124,12 @@ suite("test_compaction_sparse_column", "p1,nonConcurrent") { } } + // wait compaction to start + Thread.sleep(10000) + // wait for all compactions done for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); + assertCompactionStatus(backendId_to_backendIP.get(tablet.BackendId), backendId_to_backendHttpPort.get(tablet.BackendId), tablet.TabletId) } int rowCount = 0