diff --git a/regression-test/plugins/plugin_compaction.groovy b/regression-test/plugins/plugin_compaction.groovy new file mode 100644 index 00000000000000..385292eb1fe86d --- /dev/null +++ b/regression-test/plugins/plugin_compaction.groovy @@ -0,0 +1,156 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.apache.doris.regression.suite.Suite +import java.util.concurrent.TimeUnit +import org.awaitility.Awaitility; + +Suite.metaClass.be_get_compaction_status{ String ip, String port, String tablet_id /* param */-> + return curl("GET", String.format("http://%s:%s/api/compaction/run_status?tablet_id=%s", ip, port, tablet_id)) +} + +Suite.metaClass.be_get_overall_compaction_status{ String ip, String port /* param */-> + return curl("GET", String.format("http://%s:%s/api/compaction/run_status", ip, port)) +} + +Suite.metaClass.be_show_tablet_status{ String ip, String port, String tablet_id /* param */-> + return curl("GET", String.format("http://%s:%s/api/compaction/show?tablet_id=%s", ip, port, tablet_id)) +} + +Suite.metaClass._be_run_compaction = { String ip, String port, String tablet_id, String compact_type -> + return curl("POST", String.format("http://%s:%s/api/compaction/run?tablet_id=%s&compact_type=%s", + ip, port, tablet_id, compact_type)) +} + +Suite.metaClass.be_run_base_compaction = { String ip, String port, String tablet_id /* param */-> + return _be_run_compaction(ip, port, tablet_id, "base") +} + +logger.info("Added 'be_run_base_compaction' function to Suite") + +Suite.metaClass.be_run_cumulative_compaction = { String ip, String port, String tablet_id /* param */-> + return _be_run_compaction(ip, port, tablet_id, "cumulative") +} + +logger.info("Added 'be_run_cumulative_compaction' function to Suite") + +Suite.metaClass.be_run_full_compaction = { String ip, String port, String tablet_id /* param */-> + return _be_run_compaction(ip, port, tablet_id, "full") +} + +Suite.metaClass.be_run_full_compaction_by_table_id = { String ip, String port, String table_id /* param */-> + return curl("POST", String.format("http://%s:%s/api/compaction/run?table_id=%s&compact_type=full", ip, port, table_id)) +} + +logger.info("Added 'be_run_full_compaction' function to Suite") + +Suite.metaClass.trigger_and_wait_compaction = { String table_name, String compaction_type, int timeout_seconds=300 -> + if (!(compaction_type in ["cumulative", "base", "full"])) { + throw new IllegalArgumentException("invalid compaction type: ${compaction_type}, supported types: cumulative, base, full") + } + + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + def tablets = sql_return_maparray """show tablets from ${table_name}""" + def exit_code, stdout, stderr + + def auto_compaction_disabled = sql("show create table ${table_name}")[0][1].contains('"disable_auto_compaction" = "true"') + def is_time_series_compaction = sql("show create table ${table_name}")[0][1].contains('"compaction_policy" = "time_series"') + + // 1. cache compaction status + def be_tablet_compaction_status = [:] + for (tablet in tablets) { + def be_host = backendId_to_backendIP["${tablet.BackendId}"] + def be_port = backendId_to_backendHttpPort["${tablet.BackendId}"] + (exit_code, stdout, stderr) = be_show_tablet_status(be_host, be_port, tablet.TabletId) + assert exit_code == 0: "get tablet status failed, exit code: ${exit_code}, stdout: ${stdout}, stderr: ${stderr}" + + def tabletStatus = parseJson(stdout.trim()) + be_tablet_compaction_status.put("${be_host}-${tablet.TabletId}", tabletStatus) + } + // 2. trigger compaction + def triggered_tablets = [] + for (tablet in tablets) { + def be_host = backendId_to_backendIP["${tablet.BackendId}"] + def be_port = backendId_to_backendHttpPort["${tablet.BackendId}"] + switch (compaction_type) { + case "cumulative": + (exit_code, stdout, stderr) = be_run_cumulative_compaction(be_host, be_port, tablet.TabletId) + break + case "base": + (exit_code, stdout, stderr) = be_run_base_compaction(be_host, be_port, tablet.TabletId) + break + case "full": + (exit_code, stdout, stderr) = be_run_full_compaction(be_host, be_port, tablet.TabletId) + break + } + assert exit_code == 0: "trigger compaction failed, exit code: ${exit_code}, stdout: ${stdout}, stderr: ${stderr}" + def trigger_status = parseJson(stdout.trim()) + if (trigger_status.status.toLowerCase() != "success") { + if (trigger_status.status.toLowerCase() == "already_exist") { + triggered_tablets.add(tablet) // compaction already in queue, treat it as successfully triggered + } else if (!auto_compaction_disabled) { + // ignore the error if auto compaction enabled + } else { + throw new Exception("trigger compaction failed, be host: ${be_host}, tablet id: ${tablet.TabletId}, status: ${trigger_status.status}") + } + } else { + triggered_tablets.add(tablet) + } + } + + // 3. wait all compaction finished + def running = triggered_tablets.size() > 0 + Awaitility.await().atMost(timeout_seconds, TimeUnit.SECONDS).pollInterval(1, TimeUnit.SECONDS).until(() -> { + for (tablet in triggered_tablets) { + def be_host = backendId_to_backendIP["${tablet.BackendId}"] + def be_port = backendId_to_backendHttpPort["${tablet.BackendId}"] + + (exit_code, stdout, stderr) = be_get_compaction_status(be_host, be_port, tablet.TabletId) + assert exit_code == 0: "get compaction status failed, exit code: ${exit_code}, stdout: ${stdout}, stderr: ${stderr}" + def compactionStatus = parseJson(stdout.trim()) + assert compactionStatus.status.toLowerCase() == "success": "compaction failed, be host: ${be_host}, tablet id: ${tablet.TabletId}, status: ${compactionStatus.status}" + // running is true means compaction is still running + running = compactionStatus.run_status + + if (!isCloudMode() && !is_time_series_compaction) { + (exit_code, stdout, stderr) = be_show_tablet_status(be_host, be_port, tablet.TabletId) + assert exit_code == 0: "get tablet status failed, exit code: ${exit_code}, stdout: ${stdout}, stderr: ${stderr}" + def tabletStatus = parseJson(stdout.trim()) + def oldStatus = be_tablet_compaction_status.get("${be_host}-${tablet.TabletId}") + // last compaction success time isn't updated, indicates compaction is not started(so we treat it as running and wait) + running = running || (oldStatus["last ${compaction_type} success time"] == tabletStatus["last ${compaction_type} success time"]) + if (running) { + logger.info("compaction is still running, be host: ${be_host}, tablet id: ${tablet.TabletId}, run status: ${compactionStatus.run_status}, old status: ${oldStatus}, new status: ${tabletStatus}") + return false + } + } else { + // 1. cloud mode doesn't show compaction success time in tablet status for the time being, + // 2. time series compaction sometimes doesn't update compaction success time + // so we solely check run_status for these two cases + if (running) { + logger.info("compaction is still running, be host: ${be_host}, tablet id: ${tablet.TabletId}") + return false + } + } + } + return true + }) + + assert !running: "wait compaction timeout, be host: ${be_host}" +} diff --git a/regression-test/plugins/plugin_curl_requester.groovy b/regression-test/plugins/plugin_curl_requester.groovy index 62b7433f37b6cf..cb8e04acf6a83e 100644 --- a/regression-test/plugins/plugin_curl_requester.groovy +++ b/regression-test/plugins/plugin_curl_requester.groovy @@ -27,6 +27,7 @@ import org.apache.http.conn.ConnectTimeoutException import org.apache.http.conn.HttpHostConnectException import org.codehaus.groovy.runtime.IOGroovyMethods + Suite.metaClass.http_client = { String method, String url /* param */ -> Suite suite = delegate as Suite if (method != "GET" && method != "POST") { @@ -35,7 +36,7 @@ Suite.metaClass.http_client = { String method, String url /* param */ -> if (!url || !(url =~ /^https?:\/\/.+/)) { throw new Exception("Invalid url: ${url}") } - + Integer timeout = 300 // seconds Integer maxRetries = 10 Integer retryCount = 0 @@ -71,7 +72,7 @@ Suite.metaClass.http_client = { String method, String url /* param */ -> try { code = response.getStatusLine().getStatusCode() out = EntityUtils.toString(response.getEntity()) - + if (code >= 200 && code < 300) { code = 0 // to be compatible with the old curl function err = "" @@ -97,7 +98,7 @@ Suite.metaClass.http_client = { String method, String url /* param */ -> } sleep(sleepTime) - sleepTime = Math.min(sleepTime * 2, 60000) + sleepTime = Math.min(sleepTime * 2, 60000) } logger.error("HTTP request failed after ${maxRetries} attempts") @@ -111,7 +112,7 @@ Suite.metaClass.http_client = { String method, String url /* param */ -> logger.info("Added 'http_client' function to Suite") -Suite.metaClass.curl = { String method, String url, String body = null /* param */-> +Suite.metaClass.curl = { String method, String url, String body = null /* param */-> Suite suite = delegate as Suite if (method != "GET" && method != "POST") { throw new Exception(String.format("invalid curl method: %s", method)) @@ -119,7 +120,7 @@ Suite.metaClass.curl = { String method, String url, String body = null /* param if (url.isBlank()) { throw new Exception("invalid curl url, blank") } - + Integer timeout = 10; // 10 seconds; Integer maxRetries = 10; // Maximum number of retries Integer retryCount = 0; // Current retry count @@ -131,7 +132,7 @@ Suite.metaClass.curl = { String method, String url, String body = null /* param } else { cmd = String.format("curl --max-time %d -X %s %s", timeout, method, url).toString() } - + logger.info("curl cmd: " + cmd) def process int code @@ -161,57 +162,14 @@ Suite.metaClass.curl = { String method, String url, String body = null /* param return [code, out, err] } - logger.info("Added 'curl' function to Suite") - Suite.metaClass.show_be_config = { String ip, String port /*param */ -> return curl("GET", String.format("http://%s:%s/api/show_config", ip, port)) } logger.info("Added 'show_be_config' function to Suite") -Suite.metaClass.be_get_compaction_status{ String ip, String port, String tablet_id /* param */-> - return curl("GET", String.format("http://%s:%s/api/compaction/run_status?tablet_id=%s", ip, port, tablet_id)) -} - -Suite.metaClass.be_get_overall_compaction_status{ String ip, String port /* param */-> - return curl("GET", String.format("http://%s:%s/api/compaction/run_status", ip, port)) -} - -Suite.metaClass.be_show_tablet_status{ String ip, String port, String tablet_id /* param */-> - return curl("GET", String.format("http://%s:%s/api/compaction/show?tablet_id=%s", ip, port, tablet_id)) -} - -logger.info("Added 'be_get_compaction_status' function to Suite") - -Suite.metaClass._be_run_compaction = { String ip, String port, String tablet_id, String compact_type -> - return curl("POST", String.format("http://%s:%s/api/compaction/run?tablet_id=%s&compact_type=%s", - ip, port, tablet_id, compact_type)) -} - -Suite.metaClass.be_run_base_compaction = { String ip, String port, String tablet_id /* param */-> - return _be_run_compaction(ip, port, tablet_id, "base") -} - -logger.info("Added 'be_run_base_compaction' function to Suite") - -Suite.metaClass.be_run_cumulative_compaction = { String ip, String port, String tablet_id /* param */-> - return _be_run_compaction(ip, port, tablet_id, "cumulative") -} - -logger.info("Added 'be_run_cumulative_compaction' function to Suite") - -Suite.metaClass.be_run_full_compaction = { String ip, String port, String tablet_id /* param */-> - return _be_run_compaction(ip, port, tablet_id, "full") -} - -Suite.metaClass.be_run_full_compaction_by_table_id = { String ip, String port, String table_id /* param */-> - return curl("POST", String.format("http://%s:%s/api/compaction/run?table_id=%s&compact_type=full", ip, port, table_id)) -} - -logger.info("Added 'be_run_full_compaction' function to Suite") - Suite.metaClass.update_be_config = { String ip, String port, String key, String value /*param */ -> return curl("POST", String.format("http://%s:%s/api/update_config?%s=%s", ip, port, key, value)) } @@ -233,7 +191,6 @@ Suite.metaClass.update_all_be_config = { String key, Object value -> logger.info("Added 'update_all_be_config' function to Suite") - Suite.metaClass._be_report = { String ip, int port, String reportName -> def url = "http://${ip}:${port}/api/report/${reportName}" def result = Http.GET(url, true) @@ -262,7 +219,7 @@ Suite.metaClass.be_report_task = { String ip, int port -> logger.info("Added 'be_report_task' function to Suite") // check nested index file api -Suite.metaClass.check_nested_index_file = { ip, port, tablet_id, expected_rowsets_count, expected_indices_count, format -> +Suite.metaClass.check_nested_index_file = { ip, port, tablet_id, expected_rowsets_count, expected_indices_count, format -> def (code, out, err) = http_client("GET", String.format("http://%s:%s/api/show_nested_index_file?tablet_id=%s", ip, port, tablet_id)) logger.info("Run show_nested_index_file_on_tablet: code=" + code + ", out=" + out + ", err=" + err) // only when the expected_indices_count is 0, the tablet may not have the index file. @@ -299,4 +256,5 @@ Suite.metaClass.check_nested_index_file = { ip, port, tablet_id, expected_rowset } } -logger.info("Added 'check_nested_index_file' function to Suite") \ No newline at end of file +logger.info("Added 'check_nested_index_file' function to Suite") + diff --git a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy index 8975d92f2ee893..1f6f06f91fbf10 100644 --- a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy +++ b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy @@ -87,15 +87,13 @@ suite("test_stale_rowset") { |PROPERTIES( |"exec_mem_limit" = "8589934592", |"load_parallelism" = "3")""".stripMargin() - - def table = "nation" sql new File("""${context.file.parent}/../ddl/${table}_delete.sql""").text // create table if not exists sql new File("""${context.file.parent}/../ddl/${table}.sql""").text - def load_nation_once = { + def load_nation_once = { def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() def loadLabel = table + "_" + uniqueID // load data from cos @@ -160,63 +158,7 @@ suite("test_stale_rowset") { String[][] tablets = sql """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative") - - String command = sb.toString() - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") sql """ select count(*) from ${tableName}; diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy index 7d03f4daf82cfd..a7a53ab6c35802 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy @@ -77,8 +77,6 @@ suite("test_multi_stale_rowset") { |PROPERTIES( |"exec_mem_limit" = "8589934592", |"load_parallelism" = "3")""".stripMargin() - - sql "use @regression_cluster_name0" @@ -88,7 +86,7 @@ suite("test_multi_stale_rowset") { sql (new File("""${context.file.parent}/ddl/${table}.sql""").text + ttlProperties) sleep(10000) - def load_customer_once = { + def load_customer_once = { def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() def loadLabel = table + "_" + uniqueID // load data from cos @@ -145,55 +143,7 @@ suite("test_multi_stale_rowset") { String[][] tablets = sql """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://") - sb.append(ipList[0]) - sb.append(":") - sb.append(httpPortList[0]) - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative") - - String command = sb.toString() - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://") - sb.append(ipList[0]) - sb.append(":") - sb.append(httpPortList[0]) - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") sleep(90000); def backendIdToAfterCompactionCacheSize = getCurCacheSize() diff --git a/regression-test/suites/cloud_p1/schema_change/compaction1/test_schema_change_with_compaction1.groovy b/regression-test/suites/cloud_p1/schema_change/compaction1/test_schema_change_with_compaction1.groovy index 549542e6dee285..92a10c5db5186f 100644 --- a/regression-test/suites/cloud_p1/schema_change/compaction1/test_schema_change_with_compaction1.groovy +++ b/regression-test/suites/cloud_p1/schema_change/compaction1/test_schema_change_with_compaction1.groovy @@ -82,33 +82,21 @@ suite('test_schema_change_with_compaction1', 'p1,nonConcurrent') { sql "delete from date where d_datekey < 19900000" sql "select count(*) from date" // cu compaction - logger.info("run compaction:" + originTabletId) - (code, out, err) = be_run_cumulative_compaction(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - boolean running = true - do { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) + trigger_and_wait_compaction("date", "cumulative") } try { load_delete_compaction() load_delete_compaction() load_delete_compaction() - + load_date_once("date"); sleep(1000) DebugPoint.enableDebugPoint(injectBe.Host, injectBe.HttpPort.toInteger(), NodeType.BE, injectName) sql "ALTER TABLE date MODIFY COLUMN d_holidayfl bigint(11)" - sleep(5000) + sleep(5000) array = sql_return_maparray("SHOW TABLETS FROM date") for (int i = 0; i < 5; i++) { @@ -116,50 +104,16 @@ suite('test_schema_change_with_compaction1', 'p1,nonConcurrent') { } // base compaction - logger.info("run compaction:" + originTabletId) - (code, out, err) = be_run_base_compaction(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - - - // wait for all compactions done - boolean running = true - while (running) { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } + trigger_and_wait_compaction("date", "base") def newTabletId = array[1].TabletId logger.info("run compaction:" + newTabletId) (code, out, err) = be_run_base_compaction(injectBe.Host, injectBe.HttpPort, newTabletId) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertTrue(out.contains("invalid tablet state.")) - - // cu compaction - for (int i = 0; i < array.size(); i++) { - tabletId = array[i].TabletId - logger.info("run compaction:" + tabletId) - (code, out, err) = be_run_cumulative_compaction(injectBe.Host, injectBe.HttpPort, tabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - } - for (int i = 0; i < array.size(); i++) { - running = true - do { - Thread.sleep(100) - tabletId = array[i].TabletId - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, tabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + // cu compaction + trigger_and_wait_compaction("date", "cumulative") } finally { if (injectBe != null) { DebugPoint.disableDebugPoint(injectBe.Host, injectBe.HttpPort.toInteger(), NodeType.BE, injectName) @@ -197,25 +151,9 @@ suite('test_schema_change_with_compaction1', 'p1,nonConcurrent') { assertTrue(out.contains("[7-7]")) assertTrue(out.contains("[8-8]")) assertTrue(out.contains("[9-13]")) - - // base compaction - logger.info("run compaction:" + newTabletId) - (code, out, err) = be_run_base_compaction(injectBe.Host, injectBe.HttpPort, newTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - - - // wait for all compactions done - boolean running = true - while (running) { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, newTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } + // base compaction + trigger_and_wait_compaction("date", "base") logger.info("run show:" + newTabletId) (code, out, err) = be_show_tablet_status(injectBe.Host, injectBe.HttpPort, newTabletId) logger.info("Run show: code=" + code + ", out=" + out + ", err=" + err) @@ -230,22 +168,7 @@ suite('test_schema_change_with_compaction1', 'p1,nonConcurrent') { sql """ select count(*) from date """ - logger.info("run compaction:" + newTabletId) - (code, out, err) = be_run_cumulative_compaction(injectBe.Host, injectBe.HttpPort, newTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - - // wait for all compactions done - running = true - while (running) { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, newTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } - + trigger_and_wait_compaction("date", "cumulative") logger.info("run show:" + newTabletId) (code, out, err) = be_show_tablet_status(injectBe.Host, injectBe.HttpPort, newTabletId) logger.info("Run show: code=" + code + ", out=" + out + ", err=" + err) diff --git a/regression-test/suites/cloud_p1/schema_change/compaction10/test_schema_change_with_compaction10.groovy b/regression-test/suites/cloud_p1/schema_change/compaction10/test_schema_change_with_compaction10.groovy index ea5e818c2ee06b..ca8b2f70ab9b3b 100644 --- a/regression-test/suites/cloud_p1/schema_change/compaction10/test_schema_change_with_compaction10.groovy +++ b/regression-test/suites/cloud_p1/schema_change/compaction10/test_schema_change_with_compaction10.groovy @@ -84,32 +84,20 @@ suite('test_schema_change_with_compaction10', 'docker') { sql "delete from date where d_datekey < 19900000" sql "select count(*) from date" // cu compaction - logger.info("run compaction:" + originTabletId) - (code, out, err) = be_run_cumulative_compaction(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - boolean running = true - do { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) + trigger_and_wait_compaction("date", "cumulative") } try { load_delete_compaction() load_delete_compaction() load_delete_compaction() - + load_date_once("date"); sleep(1000) GetDebugPoint().enableDebugPointForAllBEs(injectName) sql "ALTER TABLE date MODIFY COLUMN d_holidayfl bigint(11)" - sleep(5000) + sleep(5000) array = sql_return_maparray("SHOW TABLETS FROM date") for (int i = 0; i < 5; i++) { @@ -121,51 +109,15 @@ suite('test_schema_change_with_compaction10', 'docker') { sleep(30000) // base compaction - logger.info("run compaction:" + originTabletId) - (code, out, err) = be_run_base_compaction(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - - - // wait for all compactions done - boolean running = true - while (running) { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } + trigger_and_wait_compaction("date", "base") def newTabletId = array[1].TabletId logger.info("run compaction:" + newTabletId) (code, out, err) = be_run_base_compaction(injectBe.Host, injectBe.HttpPort, newTabletId) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertTrue(out.contains("invalid tablet state.")) - // cu compaction - for (int i = 0; i < array.size(); i++) { - tabletId = array[i].TabletId - logger.info("run compaction:" + tabletId) - (code, out, err) = be_run_cumulative_compaction(injectBe.Host, injectBe.HttpPort, tabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - } - - for (int i = 0; i < array.size(); i++) { - running = true - do { - Thread.sleep(100) - tabletId = array[i].TabletId - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, tabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - + trigger_and_wait_compaction("date", "cumulative") } finally { if (injectBe != null) { GetDebugPoint().disableDebugPointForAllBEs(injectName) @@ -203,25 +155,9 @@ suite('test_schema_change_with_compaction10', 'docker') { assertTrue(out.contains("[7-7]")) assertTrue(out.contains("[8-8]")) assertTrue(out.contains("[9-13]")) - - // base compaction - logger.info("run compaction:" + newTabletId) - (code, out, err) = be_run_base_compaction(injectBe.Host, injectBe.HttpPort, newTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - - - // wait for all compactions done - boolean running = true - while (running) { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, newTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } + // base compaction + trigger_and_wait_compaction("date", "base") logger.info("run show:" + newTabletId) (code, out, err) = be_show_tablet_status(injectBe.Host, injectBe.HttpPort, newTabletId) logger.info("Run show: code=" + code + ", out=" + out + ", err=" + err) @@ -236,22 +172,7 @@ suite('test_schema_change_with_compaction10', 'docker') { sql """ select count(*) from date """ - logger.info("run compaction:" + newTabletId) - (code, out, err) = be_run_cumulative_compaction(injectBe.Host, injectBe.HttpPort, newTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - - // wait for all compactions done - running = true - while (running) { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, newTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } - + trigger_and_wait_compaction("date", "cumulative") logger.info("run show:" + newTabletId) (code, out, err) = be_show_tablet_status(injectBe.Host, injectBe.HttpPort, newTabletId) logger.info("Run show: code=" + code + ", out=" + out + ", err=" + err) diff --git a/regression-test/suites/cloud_p1/schema_change/compaction11/test_schema_change_with_compaction11.groovy b/regression-test/suites/cloud_p1/schema_change/compaction11/test_schema_change_with_compaction11.groovy index d49d8646d3fd51..768481a3264ded 100644 --- a/regression-test/suites/cloud_p1/schema_change/compaction11/test_schema_change_with_compaction11.groovy +++ b/regression-test/suites/cloud_p1/schema_change/compaction11/test_schema_change_with_compaction11.groovy @@ -84,32 +84,20 @@ suite('test_schema_change_with_compaction11', 'docker') { sql "delete from date where d_datekey < 19900000" sql "select count(*) from date" // cu compaction - logger.info("run compaction:" + originTabletId) - (code, out, err) = be_run_cumulative_compaction(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - boolean running = true - do { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) + trigger_and_wait_compaction("date", "cumulative") } try { load_delete_compaction() load_delete_compaction() load_delete_compaction() - + load_date_once("date"); sleep(1000) GetDebugPoint().enableDebugPointForAllBEs(injectName) sql "ALTER TABLE date MODIFY COLUMN d_holidayfl bigint(11)" - sleep(5000) + sleep(5000) array = sql_return_maparray("SHOW TABLETS FROM date") for (int i = 0; i < 5; i++) { @@ -117,28 +105,13 @@ suite('test_schema_change_with_compaction11', 'docker') { } // base compaction - logger.info("run compaction:" + originTabletId) - (code, out, err) = be_run_base_compaction(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - - - // wait for all compactions done - boolean running = true - while (running) { - Thread.sleep(100) - (code, out, err) = be_get_compaction_status(injectBe.Host, injectBe.HttpPort, originTabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } def newTabletId = array[1].TabletId + trigger_and_wait_compaction("date", "base") logger.info("run compaction:" + newTabletId) (code, out, err) = be_run_base_compaction(injectBe.Host, injectBe.HttpPort, newTabletId) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertTrue(out.contains("invalid tablet state.")) - + // cu compaction tabletId = array[0].TabletId @@ -203,7 +176,7 @@ suite('test_schema_change_with_compaction11', 'docker') { assertTrue(out.contains("[8-8]")) assertTrue(out.contains("[9-9]")) assertTrue(out.contains("[13-13]")) - + // base compaction logger.info("run compaction:" + newTabletId) (code, out, err) = be_run_base_compaction(injectBe.Host, injectBe.HttpPort, newTabletId) diff --git a/regression-test/suites/compaction/compaction_width_array_column.groovy b/regression-test/suites/compaction/compaction_width_array_column.groovy index 4e3fed354c7d84..d433e81f555f26 100644 --- a/regression-test/suites/compaction/compaction_width_array_column.groovy +++ b/regression-test/suites/compaction/compaction_width_array_column.groovy @@ -88,32 +88,7 @@ suite('compaction_width_array_column', "p2") { while (isOverLap && tryCnt < 3) { isOverLap = false - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - + trigger_and_wait_compaction(tableName, "cumulative") for (def tablet in tablets) { String tablet_id = tablet.TabletId (code, out, err) = curl("GET", tablet.CompactionStatus) diff --git a/regression-test/suites/compaction/test_base_compaction.groovy b/regression-test/suites/compaction/test_base_compaction.groovy index 83f6b44e611acd..5600017b138db0 100644 --- a/regression-test/suites/compaction/test_base_compaction.groovy +++ b/regression-test/suites/compaction/test_base_compaction.groovy @@ -28,7 +28,7 @@ suite("test_base_compaction", "p2") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -136,29 +136,7 @@ suite("test_base_compaction", "p2") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") streamLoad { // a default db 'regression_test' is specified in @@ -196,58 +174,13 @@ suite("test_base_compaction", "p2") { } // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } - + trigger_and_wait_compaction(tableName, "cumulative") qt_select_default """ SELECT count(*) FROM ${tableName} """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_base_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "base") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_base_compaction_no_value.groovy b/regression-test/suites/compaction/test_base_compaction_no_value.groovy index 1ed30459521581..44a833bf5bb758 100644 --- a/regression-test/suites/compaction/test_base_compaction_no_value.groovy +++ b/regression-test/suites/compaction/test_base_compaction_no_value.groovy @@ -136,29 +136,7 @@ suite("test_base_compaction_no_value", "p2") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") streamLoad { // a default db 'regression_test' is specified in @@ -196,59 +174,13 @@ suite("test_base_compaction_no_value", "p2") { } // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } - + trigger_and_wait_compaction(tableName, "cumulative") qt_select_default """ SELECT count(*) FROM ${tableName} """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_base_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } - + trigger_and_wait_compaction(tableName, "base") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_base_compaction_with_dup_key_max_file_size_limit.groovy b/regression-test/suites/compaction/test_base_compaction_with_dup_key_max_file_size_limit.groovy index d5b47a526eabc2..400e799a914e77 100644 --- a/regression-test/suites/compaction/test_base_compaction_with_dup_key_max_file_size_limit.groovy +++ b/regression-test/suites/compaction/test_base_compaction_with_dup_key_max_file_size_limit.groovy @@ -19,11 +19,11 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { def tableName = "test_base_compaction_with_dup_key_max_file_size_limit" - + // use customer table of tpch_sf100 def rows = 15000000 - def load_tpch_sf100_customer = { - def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() + def load_tpch_sf100_customer = { + def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() def rowCount = sql "select count(*) from ${tableName}" def s3BucketName = getS3BucketName() def s3WithProperties = """WITH S3 ( @@ -63,7 +63,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { } sleep(5000) } - } + } } try { String backend_id; @@ -73,7 +73,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -107,29 +107,6 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { } assertEquals(code, 0) return out - } - - def waitForCompaction = { be_host, be_http_port, tablet_id -> - // wait for all compactions done - boolean running = true - do { - Thread.sleep(1000) - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) } sql """ DROP TABLE IF EXISTS ${tableName}; """ @@ -149,7 +126,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = "true" ) - """ + """ def tablet = (sql_return_maparray """ show tablets from ${tableName}; """)[0] String tablet_id = tablet.TabletId @@ -165,10 +142,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { // [0-1] 0 // [2-2] 1G nooverlapping // cp: 3 - assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) - + trigger_and_wait_compaction(tableName, "cumulative") // rowsets: // [0-1] 0 @@ -181,21 +155,15 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { // [0-1] 0 // [2-2] 1G nooverlapping // [3-3] 1G nooverlapping - // cp: 4 - assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) - + // cp: 4 + trigger_and_wait_compaction(tableName, "cumulative") // The conditions for base compaction have been satisfied. // Since the size of first input rowset is 0, there is no file size limitation. (maybe fix it?) // rowsets: // [0-3] 2G nooverlapping - // cp: 4 - assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], - "base", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) - + // cp: 4 + trigger_and_wait_compaction(tableName, "base") // rowsets: // [0-3] 2G nooverlapping @@ -207,10 +175,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { // [0-3] 2G nooverlapping // [4-4] 1G nooverlapping // cp: 5 - assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) - + trigger_and_wait_compaction(tableName, "cumulative") // Due to the limit of config::base_compaction_dup_key_max_file_size_mbytes(1G), // can not do base compaction, return E-808 @@ -218,6 +183,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { // [0-3] 2G nooverlapping // [4-4] 1G nooverlapping // cp: 5 + // WHAT: replace with plugin and handle fail? assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], "base", tablet_id).contains("E-808")); diff --git a/regression-test/suites/compaction/test_compacation_with_delete.groovy b/regression-test/suites/compaction/test_compacation_with_delete.groovy index e41f787c52276e..43aaaa5485af18 100644 --- a/regression-test/suites/compaction/test_compacation_with_delete.groovy +++ b/regression-test/suites/compaction/test_compacation_with_delete.groovy @@ -99,37 +99,7 @@ suite("test_compaction_with_delete") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_compaction_agg_keys.groovy b/regression-test/suites/compaction/test_compaction_agg_keys.groovy index 50c79eb7a2e50d..480f4696e5fe55 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys.groovy @@ -27,7 +27,7 @@ suite("test_compaction_agg_keys") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); backend_id = backendId_to_backendIP.keySet()[0] - + def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -105,37 +105,7 @@ suite("test_compaction_agg_keys") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - - }); - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) @@ -145,7 +115,7 @@ suite("test_compaction_agg_keys") { (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) - + assertEquals(code, 0) def tabletJson = parseJson(out.trim()) assert tabletJson.rowsets instanceof List diff --git a/regression-test/suites/compaction/test_compaction_agg_keys_with_array_map.groovy b/regression-test/suites/compaction/test_compaction_agg_keys_with_array_map.groovy index 1556d2f00a506f..bad84c832954b4 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys_with_array_map.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys_with_array_map.groovy @@ -26,7 +26,7 @@ suite("test_compaction_agg_keys_with_array_map") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); backend_id = backendId_to_backendIP.keySet()[0] - + def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -96,38 +96,7 @@ suite("test_compaction_agg_keys_with_array_map") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) @@ -137,7 +106,7 @@ suite("test_compaction_agg_keys_with_array_map") { (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) - + assertEquals(code, 0) def tabletJson = parseJson(out.trim()) assert tabletJson.rowsets instanceof List diff --git a/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy index 1610587602d575..99ea6077e46b17 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy @@ -115,44 +115,14 @@ suite("test_compaction_agg_keys_with_delete") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 for (def tablet in tablets) { String tablet_id = tablet.TabletId - + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) diff --git a/regression-test/suites/compaction/test_compaction_cumu_delete.groovy b/regression-test/suites/compaction/test_compaction_cumu_delete.groovy index 31f1331969e16e..1b5a022ae50230 100644 --- a/regression-test/suites/compaction/test_compaction_cumu_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_cumu_delete.groovy @@ -66,53 +66,6 @@ suite("test_compaction_cumu_delete") { return } - - def triggerCompaction = { be_host, be_http_port, compact_type -> - // trigger compactions for all tablets in ${tableName} - String tablet_id = tablet[0] - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=${compact_type}") - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", disableAutoCompaction " + disableAutoCompaction + ", err=" + err) - if (!disableAutoCompaction) { - return "Success" - } - assertEquals(code, 0) - return out - } - def waitForCompaction = { be_host, be_http_port -> - // wait for all compactions done - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet[0] - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - // insert 11 values for 11 version sql """ INSERT INTO ${tableName} VALUES (1, "a", 100); """ sql """ INSERT INTO ${tableName} VALUES (2, "a", 100); """ @@ -127,7 +80,7 @@ suite("test_compaction_cumu_delete") { // [0-1] [2-12] // write some key in version 13, delete it in version 14, write same key in version 15 // make sure the key in version 15 will not be deleted - assertTrue(triggerCompaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id], "base").contains("Success")); + trigger_and_wait_compaction(tableName, "base") sql """ INSERT INTO ${tableName} VALUES (4, "a", 100); """ qt_select_default """ SELECT * FROM ${tableName}; """ sql """ DELETE FROM ${tableName} WHERE id = 4; """ @@ -147,12 +100,10 @@ suite("test_compaction_cumu_delete") { sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ qt_select_default """ SELECT * FROM ${tableName}; """ - assertTrue(triggerCompaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id], "cumulative").contains("Success")); - waitForCompaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id]) + trigger_and_wait_compaction(tableName, "cumulative") qt_select_default """ SELECT * FROM ${tableName}; """ - assertTrue(triggerCompaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id], "base").contains("Success")); - waitForCompaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id]) + trigger_and_wait_compaction(tableName, "base") qt_select_default """ SELECT * FROM ${tableName}; """ } finally { // try_sql("DROP TABLE IF EXISTS ${tableName}") diff --git a/regression-test/suites/compaction/test_compaction_dup_keys.groovy b/regression-test/suites/compaction/test_compaction_dup_keys.groovy index 458185ba8069ac..bf435a734b51b8 100644 --- a/regression-test/suites/compaction/test_compaction_dup_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_dup_keys.groovy @@ -28,7 +28,7 @@ suite("test_compaction_dup_keys") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -103,38 +103,7 @@ suite("test_compaction_dup_keys") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) @@ -145,7 +114,7 @@ suite("test_compaction_dup_keys") { (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) - + def tabletJson = parseJson(out.trim()) assert tabletJson.rowsets instanceof List for (String rowset in (List) tabletJson.rowsets) { diff --git a/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy index 2e34086172a553..02b5cfa95a61cb 100644 --- a/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy @@ -28,7 +28,7 @@ suite("test_compaction_dup_keys_with_delete") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -104,7 +104,7 @@ suite("test_compaction_dup_keys_with_delete") { sql """ DELETE FROM ${tableName} where user_id <= 5 """ - + sql """ INSERT INTO ${tableName} VALUES (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', '2017-10-01 11:11:11.110111', 'Beijing', 10, 1, NULL, NULL, NULL, NULL, '2020-01-05', 1, 34, 20) """ @@ -115,50 +115,18 @@ suite("test_compaction_dup_keys_with_delete") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 for (def tablet in tablets) { String tablet_id = tablet.TabletId - + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) - + def tabletJson = parseJson(out.trim()) assert tabletJson.rowsets instanceof List for (String rowset in (List) tabletJson.rowsets) { diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys.groovy index 21fd16403ac79f..fac2cc4ac80e61 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys.groovy @@ -29,7 +29,7 @@ suite("test_compaction_uniq_keys") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -104,39 +104,11 @@ suite("test_compaction_uniq_keys") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) - + int rowCount = 0 for (def tablet in tablets) { String tablet_id = tablet.TabletId diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy index b65557b059c800..1b401b3c789898 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy @@ -28,7 +28,7 @@ suite("test_compaction_uniq_keys_ck") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -62,9 +62,9 @@ suite("test_compaction_uniq_keys_ck") { `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间", `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间") UNIQUE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, `datetimev2_2`, `city`, `age`, `sex`) - CLUSTER BY(`last_visit_date_not_null`, `age`, `sex`, `city`) + CLUSTER BY(`last_visit_date_not_null`, `age`, `sex`, `city`) DISTRIBUTED BY HASH(`user_id`) - PROPERTIES ( + PROPERTIES ( "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" ); @@ -108,37 +108,7 @@ suite("test_compaction_uniq_keys_ck") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy index 82b8f2b8d5a06b..9917cbb23f7976 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy @@ -44,7 +44,7 @@ suite("test_compaction_uniq_keys_row_store", "p0") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -185,38 +185,7 @@ suite("test_compaction_uniq_keys_row_store", "p0") { checkValue() // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy index 5145e810c1b020..19a6d467d84865 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy @@ -44,7 +44,7 @@ suite("test_compaction_uniq_keys_row_store_ck", "p0") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -138,7 +138,7 @@ suite("test_compaction_uniq_keys_row_store_ck", "p0") { `cost` BIGINT DEFAULT "0" COMMENT "用户总消费", `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间", `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间") - UNIQUE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, `datetimev2_2`, `city`, `age`, `sex`) + UNIQUE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, `datetimev2_2`, `city`, `age`, `sex`) CLUSTER BY(`last_visit_date`, `last_update_date`, `city`, `cost`) DISTRIBUTED BY HASH(`user_id`) PROPERTIES ( "replication_num" = "1", @@ -187,38 +187,7 @@ suite("test_compaction_uniq_keys_row_store_ck", "p0") { checkValue() // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy index 18a46422d3292b..463597124c78c8 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy @@ -28,7 +28,7 @@ suite("test_compaction_uniq_keys_with_delete") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -119,37 +119,7 @@ suite("test_compaction_uniq_keys_with_delete") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy index 21af1a9220788c..6a61c84a1f6003 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy @@ -28,7 +28,7 @@ suite("test_compaction_uniq_keys_with_delete_ck") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -62,7 +62,7 @@ suite("test_compaction_uniq_keys_with_delete_ck") { `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间", `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间") UNIQUE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, `datetimev2_2`, `city`, `age`, `sex`) - CLUSTER BY(`sex`, `date`, `cost`) + CLUSTER BY(`sex`, `date`, `cost`) DISTRIBUTED BY HASH(`user_id`) PROPERTIES ( "replication_num" = "1", @@ -136,37 +136,7 @@ suite("test_compaction_uniq_keys_with_delete_ck") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_cu_compaction_remove_old_version_delete_bitmap.groovy b/regression-test/suites/compaction/test_cu_compaction_remove_old_version_delete_bitmap.groovy index e14bdbbb2c6505..bfd07d6546b18d 100644 --- a/regression-test/suites/compaction/test_cu_compaction_remove_old_version_delete_bitmap.groovy +++ b/regression-test/suites/compaction/test_cu_compaction_remove_old_version_delete_bitmap.groovy @@ -380,4 +380,4 @@ suite("test_cu_compaction_remove_old_version_delete_bitmap", "nonConcurrent") { GetDebugPoint().disableDebugPointForAllBEs("CloudCumulativeCompaction.modify_rowsets.update_delete_bitmap_failed") } -} \ No newline at end of file +} diff --git a/regression-test/suites/compaction/test_full_compaction.groovy b/regression-test/suites/compaction/test_full_compaction.groovy index b54f92747e6c4e..1849a45e8aebe7 100644 --- a/regression-test/suites/compaction/test_full_compaction.groovy +++ b/regression-test/suites/compaction/test_full_compaction.groovy @@ -46,9 +46,9 @@ suite("test_full_compaction") { sql """ CREATE TABLE ${tableName} ( `user_id` INT NOT NULL, `value` INT NOT NULL) - UNIQUE KEY(`user_id`) - DISTRIBUTED BY HASH(`user_id`) - BUCKETS 1 + UNIQUE KEY(`user_id`) + DISTRIBUTED BY HASH(`user_id`) + BUCKETS 1 PROPERTIES ("replication_allocation" = "tag.location.default: 1", "disable_auto_compaction" = "true", "enable_mow_light_delete" = "false", @@ -117,46 +117,9 @@ suite("test_full_compaction") { assert (rowsetCount == 7 * replicaNum) // trigger full compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - def times = 1 - - do{ - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) - - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for full compaction done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. - + rowsetCount = 0 for (def tablet in tablets) { String tablet_id = tablet.TabletId diff --git a/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy b/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy index 222c7e7138865b..ffea4f81219d02 100644 --- a/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy +++ b/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy @@ -53,9 +53,9 @@ suite("test_full_compaction_by_table_id") { sql """ CREATE TABLE ${tableName} ( `user_id` INT NOT NULL, `value` INT NOT NULL) - UNIQUE KEY(`user_id`) - DISTRIBUTED BY HASH(`user_id`) - BUCKETS 8 + UNIQUE KEY(`user_id`) + DISTRIBUTED BY HASH(`user_id`) + BUCKETS 8 PROPERTIES ("replication_allocation" = "tag.location.default: 1", "disable_auto_compaction" = "true", "enable_mow_light_delete" = "false", @@ -121,52 +121,9 @@ suite("test_full_compaction_by_table_id") { } // trigger full compactions for all tablets by table id in ${tableName} - // TODO: get table id - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ - logger.info("tablet"+tablet_info) - def table_id = tablet_info[0].TableId - backend_id = tablet.BackendId - def times = 1 - def code, out, err - do{ - (code, out, err) = be_run_full_compaction_by_table_id(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), table_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) - - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for full compaction done - { - for (def tablet : tablets) { - boolean running = true - do { - Thread.sleep(1000) - def tablet_id = tablet.TabletId - backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. - for (def tablet : tablets) { int rowsetCount = 0 def (code, out, err) = curl("GET", tablet.CompactionStatus) diff --git a/regression-test/suites/compaction/test_full_compaction_ck.groovy b/regression-test/suites/compaction/test_full_compaction_ck.groovy index ae6b467acafcb9..fd4d799f8ee882 100644 --- a/regression-test/suites/compaction/test_full_compaction_ck.groovy +++ b/regression-test/suites/compaction/test_full_compaction_ck.groovy @@ -46,10 +46,10 @@ suite("test_full_compaction_ck") { sql """ CREATE TABLE ${tableName} ( `user_id` INT NOT NULL, `value` INT NOT NULL) - UNIQUE KEY(`user_id`) + UNIQUE KEY(`user_id`) CLUSTER BY(`value`) - DISTRIBUTED BY HASH(`user_id`) - BUCKETS 1 + DISTRIBUTED BY HASH(`user_id`) + BUCKETS 1 PROPERTIES ("replication_allocation" = "tag.location.default: 1", "disable_auto_compaction" = "true", "enable_mow_light_delete" = "false", @@ -118,46 +118,9 @@ suite("test_full_compaction_ck") { assert (rowsetCount == 7 * replicaNum) // trigger full compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - def times = 1 - - do{ - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) - - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for full compaction done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. - + rowsetCount = 0 for (def tablet in tablets) { String tablet_id = tablet.TabletId diff --git a/regression-test/suites/compaction/test_single_replica_compaction.groovy b/regression-test/suites/compaction/test_single_replica_compaction.groovy index d8ff209b93cd43..05286a6fa197c6 100644 --- a/regression-test/suites/compaction/test_single_replica_compaction.groovy +++ b/regression-test/suites/compaction/test_single_replica_compaction.groovy @@ -22,7 +22,7 @@ suite("test_single_compaction_p2", "p2") { return; } def tableName = "test_single_replica_compaction" - + def calc_file_crc_on_tablet = { ip, port, tablet -> return curl("GET", String.format("http://%s:%s/api/calc_crc?tablet_id=%s", ip, port, tablet)) } @@ -146,7 +146,7 @@ suite("test_single_compaction_p2", "p2") { // wait for update replica infos Thread.sleep(70000) - + // find the master be for single replica compaction Boolean found = false String master_backend_id; @@ -210,12 +210,12 @@ suite("test_single_compaction_p2", "p2") { // trigger master be to do compaction assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "full", tablet_id).contains("Success")); + "full", tablet_id).contains("Success")); waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) // trigger follower be to fetch compaction result for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) } @@ -231,12 +231,12 @@ suite("test_single_compaction_p2", "p2") { // trigger master be to do compaction with delete assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "full", tablet_id).contains("Success")); + "full", tablet_id).contains("Success")); waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) // trigger follower be to fetch compaction result for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) } diff --git a/regression-test/suites/compaction/test_time_series_compaction_policy.groovy b/regression-test/suites/compaction/test_time_series_compaction_policy.groovy index 2e8018f94a6a09..d211ba98bdd7e2 100644 --- a/regression-test/suites/compaction/test_time_series_compaction_policy.groovy +++ b/regression-test/suites/compaction/test_time_series_compaction_policy.groovy @@ -22,45 +22,6 @@ suite("test_time_series_compaction_polciy", "p0") { def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - def trigger_cumulative_compaction_on_tablets = { tablets -> - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - int times = 1 - - String compactionStatus; - do{ - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(1000) - compactionStatus = parseJson(out.trim()).status.toLowerCase(); - } while (compactionStatus!="success" && times<=3) - if (compactionStatus!="success") { - assertTrue(compactionStatus.contains("2000")) - continue; - } - assertEquals("success", compactionStatus) - } - } - - def wait_cumulative_compaction_done = { tablets -> - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } def get_rowset_count = { tablets -> int rowsetCount = 0 @@ -109,7 +70,7 @@ suite("test_time_series_compaction_polciy", "p0") { sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 99); """ sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ sql """ INSERT INTO ${tableName} VALUES (100, "andy", "andy love apple", 100); """ - + qt_sql_1 """ select count() from ${tableName} """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus @@ -123,17 +84,14 @@ suite("test_time_series_compaction_polciy", "p0") { assert(false) } } - + // BUCKETS = 2 // before cumulative compaction, there are 17 * 2 = 34 rowsets. int rowsetCount = get_rowset_count.call(tablets); assert (rowsetCount == 34 * replicaNum) // trigger cumulative compactions for all tablets in table - trigger_cumulative_compaction_on_tablets.call(tablets) - - // wait for cumulative compaction done - wait_cumulative_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "cumulative") // after cumulative compaction, there is only 26 rowset. // 5 consecutive empty versions are merged into one empty version @@ -142,10 +100,7 @@ suite("test_time_series_compaction_polciy", "p0") { assert (rowsetCount == 26 * replicaNum) // trigger cumulative compactions for all tablets in ${tableName} - trigger_cumulative_compaction_on_tablets.call(tablets) - - // wait for cumulative compaction done - wait_cumulative_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "cumulative") // after cumulative compaction, there is only 22 rowset. // 26 - 4 = 22 @@ -159,10 +114,7 @@ suite("test_time_series_compaction_polciy", "p0") { sql """ alter table ${tableName} set ("time_series_compaction_file_count_threshold"="10")""" sql """sync""" // trigger cumulative compactions for all tablets in ${tableName} - trigger_cumulative_compaction_on_tablets.call(tablets) - - // wait for cumulative compaction done - wait_cumulative_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "cumulative") // after cumulative compaction, there is only 11 rowset. rowsetCount = get_rowset_count.call(tablets); diff --git a/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy b/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy index 8a661a00c13df1..89493e153436fe 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy @@ -116,37 +116,7 @@ suite("test_vertical_compaction_agg_keys") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_vertical_compaction_agg_state.groovy b/regression-test/suites/compaction/test_vertical_compaction_agg_state.groovy index 40a86812fd265b..22a8f653b74bc8 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_agg_state.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_agg_state.groovy @@ -77,37 +77,7 @@ suite("test_vertical_compaction_agg_state") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_vertical_compaction_dup_keys.groovy b/regression-test/suites/compaction/test_vertical_compaction_dup_keys.groovy index ef6a0fe16ec151..7cdf6b67d406e3 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_dup_keys.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_dup_keys.groovy @@ -28,7 +28,7 @@ suite("test_vertical_compaction_dup_keys") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -76,7 +76,7 @@ suite("test_vertical_compaction_dup_keys") { sql """ DELETE from ${tableName} where user_id <= 0 """ - + qt_select_default """ SELECT * FROM ${tableName} t ORDER BY user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time; """ @@ -116,37 +116,7 @@ suite("test_vertical_compaction_dup_keys") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_vertical_compaction_uniq_keys.groovy b/regression-test/suites/compaction/test_vertical_compaction_uniq_keys.groovy index e89dcf8851f814..6bff003a028be2 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_uniq_keys.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_uniq_keys.groovy @@ -113,37 +113,7 @@ suite("test_vertical_compaction_uniq_keys") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/compaction/test_vertical_compaction_uniq_keys_ck.groovy b/regression-test/suites/compaction/test_vertical_compaction_uniq_keys_ck.groovy index 66f9274d9d499e..e7fad814e15af9 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_uniq_keys_ck.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_uniq_keys_ck.groovy @@ -61,7 +61,7 @@ suite("test_vertical_compaction_uniq_keys_ck") { `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间", `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间") UNIQUE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, `datetimev2_2`, `city`, `age`, `sex`) - CLUSTER BY(`age`, `sex`, `user_id`) + CLUSTER BY(`age`, `sex`, `user_id`) DISTRIBUTED BY HASH(`user_id`) PROPERTIES ( "replication_num" = "1", "enable_unique_key_merge_on_write"="true" ); """ @@ -115,37 +115,7 @@ suite("test_vertical_compaction_uniq_keys_ck") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) diff --git a/regression-test/suites/delete_p0/test_delete_sign_with_cumu_compaction.groovy b/regression-test/suites/delete_p0/test_delete_sign_with_cumu_compaction.groovy index eca0ed41128cc8..ef2b1f748b96df 100644 --- a/regression-test/suites/delete_p0/test_delete_sign_with_cumu_compaction.groovy +++ b/regression-test/suites/delete_p0/test_delete_sign_with_cumu_compaction.groovy @@ -20,7 +20,7 @@ import static java.util.concurrent.TimeUnit.SECONDS; suite('test_delete_sign_with_cumu_compaction') { def table = 'test_delete_sign_with_cumu_compaction' - + sql """ DROP TABLE IF EXISTS ${table};""" sql """ CREATE TABLE ${table} @@ -66,51 +66,19 @@ suite('test_delete_sign_with_cumu_compaction') { return } - def waitForCompaction = { be_host, be_http_port -> - // wait for all compactions done - Awaitility.await().atMost(30, SECONDS).pollInterval(1, SECONDS).until { - String tablet_id = tablet[0] - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - - !compactionStatus.run_status - } - } - (1..10).each { i -> sql """INSERT into ${table} (col1,col2,col3) values (${i}, 2, 3)""" } - be_run_cumulative_compaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id], tablet[0]); - waitForCompaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id]) + trigger_and_wait_compaction(table, "cumulative") (11..12).each { i -> sql """INSERT into ${table} (col1,col2,col3) values (${i}, 2, 3)""" } - be_run_cumulative_compaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id], tablet[0]); - waitForCompaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id]) - - be_run_base_compaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id], tablet[0]); - waitForCompaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id]) - + trigger_and_wait_compaction(table, "cumulative") + trigger_and_wait_compaction(table, "base") (1..10).each { i -> sql """ INSERT into ${table} (col1,col2,col3,__DORIS_DELETE_SIGN__) values (${i}, 2, 3, 1) """ } - - be_run_cumulative_compaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id], tablet[0]); - waitForCompaction(backendId_to_backendIP[backend_id], backendId_to_backendHttpPort[backend_id]) - + trigger_and_wait_compaction(table, "cumulative") qt_select_default """ SELECT * FROM ${table} ORDER BY col1 """ - } diff --git a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_compaction_conflict.groovy b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_compaction_conflict.groovy index 8f4fa45700b81f..7b867088d1b61e 100644 --- a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_compaction_conflict.groovy +++ b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_compaction_conflict.groovy @@ -84,24 +84,7 @@ suite("test_cloud_mow_stale_resp_load_compaction_conflict", "nonConcurrent") { Thread.sleep(11 * 1000) // trigger full compaction on tablet - logger.info("trigger compaction on another BE ${tabletBackend.Host} with backendId=${tabletBackend.BackendId}") - def (code, out, err) = be_run_full_compaction(tabletBackend.Host, tabletBackend.HttpPort, tabletId) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - Assert.assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - Assert.assertEquals("success", compactJson.status.toLowerCase()) - - // wait for full compaction to complete - Awaitility.await().atMost(3, TimeUnit.SECONDS).pollDelay(200, TimeUnit.MILLISECONDS).pollInterval(100, TimeUnit.MILLISECONDS).until( - { - (code, out, err) = be_get_compaction_status(tabletBackend.Host, tabletBackend.HttpPort, tabletId) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - Assert.assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - Assert.assertEquals("success", compactionStatus.status.toLowerCase()) - return !compactionStatus.run_status - } - ) + trigger_and_wait_compaction(table1, "full") order_qt_sql "select * from ${table1};" @@ -116,7 +99,7 @@ suite("test_cloud_mow_stale_resp_load_compaction_conflict", "nonConcurrent") { Thread.sleep(1000) order_qt_sql "select * from ${table1};" - + } catch(Exception e) { logger.info(e.getMessage()) throw e diff --git a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_skip_compaction.groovy b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_skip_compaction.groovy index d816c30f7e9bd8..031a2ec74f7231 100644 --- a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_skip_compaction.groovy +++ b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_skip_compaction.groovy @@ -54,7 +54,7 @@ suite("test_partial_update_skip_compaction", "nonConcurrent") { } logger.info("tablet ${tabletId} on backend ${tabletBackend.Host} with backendId=${tabletBackend.BackendId}"); - def check_rs_metas = { expected_rs_meta_size, check_func -> + def check_rs_metas = { expected_rs_meta_size, check_func -> if (isCloudMode()) { return } @@ -180,7 +180,7 @@ suite("test_partial_update_skip_compaction", "nonConcurrent") { Assert.assertEquals(numRows, 3) } }) - + } catch(Exception e) { logger.info(e.getMessage()) throw e diff --git a/regression-test/suites/fault_injection_p0/test_fix_tablet_stat_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_fix_tablet_stat_fault_injection.groovy index d96f6f0ec48cd3..5703bbd29c4af5 100644 --- a/regression-test/suites/fault_injection_p0/test_fix_tablet_stat_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_fix_tablet_stat_fault_injection.groovy @@ -78,38 +78,10 @@ suite("test_fix_tablet_stat_fault_injection", "nonConcurrent") { assert tabletJson.rowsets instanceof List rowsetCount +=((List) tabletJson.rowsets).size() } - assert (rowsetCount == 6 * bucketSize * partitionSize) + assert (rowsetCount == 6 * bucketSize * partitionSize) // trigger full compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - times = 1 - - do{ - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) - - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for full compaction done - for (def tablet in tablets) { - boolean running = true - do { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "full") sleep(60000) // after full compaction, there are 2 rowsets. diff --git a/regression-test/suites/fault_injection_p0/test_full_compaciton_run_status.groovy b/regression-test/suites/fault_injection_p0/test_full_compaciton_run_status.groovy index 700409f39a9bb0..29f80528a80b41 100644 --- a/regression-test/suites/fault_injection_p0/test_full_compaciton_run_status.groovy +++ b/regression-test/suites/fault_injection_p0/test_full_compaciton_run_status.groovy @@ -20,7 +20,7 @@ suite("test_full_compaction_run_status","nonConcurrent") { return } def tableName = "full_compaction_run_status_test" - + // test successful group commit async load sql """ DROP TABLE IF EXISTS ${tableName} """ @@ -37,8 +37,8 @@ suite("test_full_compaction_run_status","nonConcurrent") { `k` int , `v` int , ) engine=olap - DISTRIBUTED BY HASH(`k`) - BUCKETS 2 + DISTRIBUTED BY HASH(`k`) + BUCKETS 2 properties( "replication_num" = "1", "disable_auto_compaction" = "true") diff --git a/regression-test/suites/fault_injection_p0/test_full_compaction_with_ordered_data.groovy b/regression-test/suites/fault_injection_p0/test_full_compaction_with_ordered_data.groovy index c6dfa6b885cf6c..c060888690c077 100644 --- a/regression-test/suites/fault_injection_p0/test_full_compaction_with_ordered_data.groovy +++ b/regression-test/suites/fault_injection_p0/test_full_compaction_with_ordered_data.groovy @@ -22,7 +22,7 @@ suite("test_full_compaction_with_ordered_data","nonConcurrent") { return } def tableName = "test_full_compaction_with_ordered_data" - + sql """ DROP TABLE IF EXISTS ${tableName} """ String backend_id; @@ -39,8 +39,8 @@ suite("test_full_compaction_with_ordered_data","nonConcurrent") { `v` int , ) engine=olap DUPLICATE KEY(k) - DISTRIBUTED BY HASH(k) - BUCKETS 3 + DISTRIBUTED BY HASH(k) + BUCKETS 3 properties( "replication_num" = "1", "disable_auto_compaction" = "true") @@ -70,38 +70,9 @@ suite("test_full_compaction_with_ordered_data","nonConcurrent") { assert (rowsetCount == 5 * replicaNum * 3) // trigger full compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - times = 1 - - do{ - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) - - } - - // wait for full compaction done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. - rowsetCount = 0 for (def tablet in tablets) { String tablet_id = tablet.TabletId @@ -155,38 +126,9 @@ suite("test_full_compaction_with_ordered_data","nonConcurrent") { assert (rowsetCount == 12 * replicaNum * 3) // trigger full compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - times = 1 - - do{ - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) - - } - - // wait for full compaction done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. - + rowsetCount = 0 for (def tablet in tablets) { String tablet_id = tablet.TabletId diff --git a/regression-test/suites/fault_injection_p0/test_index_compaction_exception_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_index_compaction_exception_fault_injection.groovy index 9c0cb5aea97f0b..8a9c6d71609a2e 100644 --- a/regression-test/suites/fault_injection_p0/test_index_compaction_exception_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_index_compaction_exception_fault_injection.groovy @@ -28,7 +28,7 @@ suite("test_index_compaction_exception_fault_injection", "nonConcurrent") { logger.info("changed variables: " + changed_variables.toString()) boolean disableAutoCompaction = false - + def set_be_config = { key, value -> for (String backend_id: backendId_to_backendIP.keySet()) { def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) @@ -108,7 +108,7 @@ suite("test_index_compaction_exception_fault_injection", "nonConcurrent") { } } - def insert_data = { -> + def insert_data = { -> sql """ INSERT INTO ${tableName} VALUES (1, "andy", 10, [89, 80, 98], ["football", "basketball"], "andy is good at sports", ["andy has a good heart", "andy is so nice"]); """ sql """ INSERT INTO ${tableName} VALUES (1, "bason", 11, [79, 85, 97], ["singing", "dancing"], "bason is good at singing", ["bason is very clever", "bason is very healthy"]); """ sql """ INSERT INTO ${tableName} VALUES (2, "andy", 10, [89, 80, 98], ["football", "basketball"], "andy is good at sports", ["andy has a good heart", "andy is so nice"]); """ @@ -117,7 +117,7 @@ suite("test_index_compaction_exception_fault_injection", "nonConcurrent") { sql """ INSERT INTO ${tableName} VALUES (3, "bason", 11, [79, 85, 97], ["singing", "dancing"], "bason is good at singing", ["bason is very clever", "bason is very healthy"]); """ } - def run_sql = { -> + def run_sql = { -> def result = sql_return_maparray "SELECT /*+ SET_VAR(enable_match_without_inverted_index = false, enable_common_expr_pushdown = true) */ * FROM ${tableName} WHERE name MATCH 'bason'" assertEquals(3, result.size()) assertEquals(1, result[0]['id']) @@ -255,7 +255,7 @@ suite("test_index_compaction_exception_fault_injection", "nonConcurrent") { } run_sql.call() - + if (debug_point == "compact_column_delete_tmp_path_error") { set_be_config.call("inverted_index_ram_dir_enable", "true") } @@ -301,7 +301,7 @@ suite("test_index_compaction_exception_fault_injection", "nonConcurrent") { String backend_id; backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -331,7 +331,7 @@ suite("test_index_compaction_exception_fault_injection", "nonConcurrent") { tableName = "test_index_compaction_exception_fault_injection_unique" create_and_test_table.call(tableName, "UNIQUE", debug_points_abnormal_compaction, true) create_and_test_table.call(tableName, "UNIQUE", debug_points_normal_compaction, false) - + } finally { if (has_update_be_config) { set_be_config.call("inverted_index_compaction_enable", invertedIndexCompactionEnable.toString()) diff --git a/regression-test/suites/fault_injection_p0/test_index_writer_file_cache.groovy b/regression-test/suites/fault_injection_p0/test_index_writer_file_cache.groovy index b26794e36714af..0ad4e22f23f316 100644 --- a/regression-test/suites/fault_injection_p0/test_index_writer_file_cache.groovy +++ b/regression-test/suites/fault_injection_p0/test_index_writer_file_cache.groovy @@ -78,31 +78,7 @@ suite("test_index_writer_file_cache_fault_injection", "nonConcurrent") { sql """ INSERT INTO ${tableName} VALUES (893964672, '26.1.0.0', 'GET /images/hm_bg.jpg HTTP/1.0', 304, 0); """ def tablets = sql_return_maparray """ show tablets from ${tableName}; """ - - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "full") } try { @@ -113,4 +89,4 @@ suite("test_index_writer_file_cache_fault_injection", "nonConcurrent") { } finally { GetDebugPoint().disableDebugPointForAllBEs("DorisFSDirectory::FSIndexOutput::init.file_cache") } -} \ No newline at end of file +} diff --git a/regression-test/suites/fault_injection_p0/test_skip_index_compaction_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_skip_index_compaction_fault_injection.groovy index cdc2bdda41716d..b63aeec2bbdc5e 100644 --- a/regression-test/suites/fault_injection_p0/test_skip_index_compaction_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_skip_index_compaction_fault_injection.groovy @@ -68,7 +68,7 @@ suite("test_skip_index_compaction_fault_injection", "nonConcurrent") { """ boolean disableAutoCompaction = false - + def set_be_config = { key, value -> for (String backend_id: backendId_to_backendIP.keySet()) { def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) @@ -76,49 +76,6 @@ suite("test_skip_index_compaction_fault_injection", "nonConcurrent") { } } - def trigger_full_compaction_on_tablets = { tablets -> - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - int times = 1 - - String compactionStatus; - do{ - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - compactionStatus = parseJson(out.trim()).status.toLowerCase(); - } while (compactionStatus!="success" && times<=10 && compactionStatus!="e-6010") - - - if (compactionStatus == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction && compactionStatus!="e-6010") { - assertEquals("success", compactionStatus) - } - } - } - - def wait_full_compaction_done = { tablets -> - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def get_rowset_count = { tablets -> int rowsetCount = 0 for (def tablet in tablets) { @@ -148,7 +105,7 @@ suite("test_skip_index_compaction_fault_injection", "nonConcurrent") { } } - def run_test = { tableName -> + def run_test = { tableName -> sql """ INSERT INTO ${tableName} VALUES (1, "40.135.0.0", "GET /images/hm_bg.jpg HTTP/1.0", 1, 2); """ sql """ INSERT INTO ${tableName} VALUES (2, "40.135.0.0", "GET /images/hm_bg.jpg HTTP/1.0", 1, 2); """ sql """ INSERT INTO ${tableName} VALUES (3, "40.135.0.0", "GET /images/hm_bg.jpg HTTP/1.0", 1, 2); """ @@ -178,15 +135,13 @@ suite("test_skip_index_compaction_fault_injection", "nonConcurrent") { assert (rowsetCount == 11 * replicaNum) // first - trigger_full_compaction_on_tablets.call(tablets) - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") rowsetCount = get_rowset_count.call(tablets); assert (rowsetCount == 11 * replicaNum) // second - trigger_full_compaction_on_tablets.call(tablets) - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") rowsetCount = get_rowset_count.call(tablets); if (isCloudMode) { @@ -202,7 +157,7 @@ suite("test_skip_index_compaction_fault_injection", "nonConcurrent") { String backend_id; backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -237,4 +192,4 @@ suite("test_skip_index_compaction_fault_injection", "nonConcurrent") { set_be_config.call("inverted_index_compaction_enable", invertedIndexCompactionEnable.toString()) } } -} \ No newline at end of file +} diff --git a/regression-test/suites/fault_injection_p0/test_variant_bloom_filter.groovy b/regression-test/suites/fault_injection_p0/test_variant_bloom_filter.groovy index 88c529d685dfe8..e1e1e051ec1beb 100644 --- a/regression-test/suites/fault_injection_p0/test_variant_bloom_filter.groovy +++ b/regression-test/suites/fault_injection_p0/test_variant_bloom_filter.groovy @@ -28,8 +28,8 @@ suite("test_variant_bloom_filter", "nonConcurrent") { table "${table_name}" // set http request header params - set 'read_json_by_line', 'true' - set 'format', 'json' + set 'read_json_by_line', 'true' + set 'format', 'json' set 'max_filter_ratio', '0.1' set 'memtable_on_sink_node', 'true' file file_name // import json file @@ -72,7 +72,7 @@ suite("test_variant_bloom_filter", "nonConcurrent") { getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); def tablets = sql_return_maparray """ show tablets from ${index_table}; """ - + for (def tablet in tablets) { int beforeSegmentCount = 0 String tablet_id = tablet.TabletId @@ -88,29 +88,7 @@ suite("test_variant_bloom_filter", "nonConcurrent") { } // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().atMost(3, TimeUnit.MINUTES).untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(index_table, "full") for (def tablet in tablets) { int afterSegmentCount = 0 @@ -126,7 +104,7 @@ suite("test_variant_bloom_filter", "nonConcurrent") { } assertEquals(afterSegmentCount, 1) } - + try { GetDebugPoint().enableDebugPointForAllBEs("bloom_filter_must_filter_data") @@ -139,4 +117,4 @@ suite("test_variant_bloom_filter", "nonConcurrent") { } finally { GetDebugPoint().disableDebugPointForAllBEs("bloom_filter_must_filter_data") } -} \ No newline at end of file +} diff --git a/regression-test/suites/inverted_index_p0/array_contains/test_index_compaction_unique_keys_arr.groovy b/regression-test/suites/inverted_index_p0/array_contains/test_index_compaction_unique_keys_arr.groovy index 602e5d552598ea..3f7965c969fc4a 100644 --- a/regression-test/suites/inverted_index_p0/array_contains/test_index_compaction_unique_keys_arr.groovy +++ b/regression-test/suites/inverted_index_p0/array_contains/test_index_compaction_unique_keys_arr.groovy @@ -32,7 +32,7 @@ suite("test_index_compaction_unique_keys_arr", "array_contains_inverted_index, n getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); boolean disableAutoCompaction = false - + def set_be_config = { key, value -> for (String backend_id: backendId_to_backendIP.keySet()) { def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) @@ -40,49 +40,6 @@ suite("test_index_compaction_unique_keys_arr", "array_contains_inverted_index, n } } - def trigger_full_compaction_on_tablets = { tablets -> - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - int times = 1 - - String compactionStatus; - do{ - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - compactionStatus = parseJson(out.trim()).status.toLowerCase(); - } while (compactionStatus!="success" && times<=10) - - - if (compactionStatus == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactionStatus) - } - } - } - - def wait_full_compaction_done = { tablets -> - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def get_rowset_count = { tablets -> int rowsetCount = 0 for (def tablet in tablets) { @@ -112,7 +69,7 @@ suite("test_index_compaction_unique_keys_arr", "array_contains_inverted_index, n } } - def run_test = { table_name -> + def run_test = { table_name -> sql """ INSERT INTO ${table_name} VALUES (1, "andy", "andy love apple", 100); """ sql """ INSERT INTO ${table_name} VALUES (1, "bason", "bason hate pear", 99); """ @@ -143,10 +100,7 @@ suite("test_index_compaction_unique_keys_arr", "array_contains_inverted_index, n assert (rowsetCount == 7 * replicaNum) // trigger full compactions for all tablets in ${table_name} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(table_name, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets); @@ -182,10 +136,7 @@ suite("test_index_compaction_unique_keys_arr", "array_contains_inverted_index, n } // trigger full compactions for all tablets in ${table_name} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(table_name, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets); @@ -207,7 +158,7 @@ suite("test_index_compaction_unique_keys_arr", "array_contains_inverted_index, n String backend_id; backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -243,7 +194,7 @@ suite("test_index_compaction_unique_keys_arr", "array_contains_inverted_index, n UNIQUE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( + PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = "true", "enable_unique_key_merge_on_write" = "true" @@ -268,7 +219,7 @@ suite("test_index_compaction_unique_keys_arr", "array_contains_inverted_index, n CLUSTER BY (`score`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( + PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = "true", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/inverted_index_p0/array_contains/test_index_compaction_with_multi_index_segments_arr.groovy b/regression-test/suites/inverted_index_p0/array_contains/test_index_compaction_with_multi_index_segments_arr.groovy index 4ac4f0e9794517..f2f24eec2a97d6 100644 --- a/regression-test/suites/inverted_index_p0/array_contains/test_index_compaction_with_multi_index_segments_arr.groovy +++ b/regression-test/suites/inverted_index_p0/array_contains/test_index_compaction_with_multi_index_segments_arr.groovy @@ -33,7 +33,7 @@ suite("test_index_compaction_with_multi_index_segments_arr", "nonConcurrent") { getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); boolean disableAutoCompaction = false - + def set_be_config = { key, value -> for (String backend_id: backendId_to_backendIP.keySet()) { def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) @@ -41,49 +41,6 @@ suite("test_index_compaction_with_multi_index_segments_arr", "nonConcurrent") { } } - def trigger_full_compaction_on_tablets = { tablets -> - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - int times = 1 - - String compactionStatus; - do{ - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - compactionStatus = parseJson(out.trim()).status.toLowerCase(); - } while (compactionStatus!="success" && times<=10) - - - if (compactionStatus == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactionStatus) - } - } - } - - def wait_full_compaction_done = { tablets -> - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def get_rowset_count = { tablets -> int rowsetCount = 0 for (def tablet in tablets) { @@ -120,7 +77,7 @@ suite("test_index_compaction_with_multi_index_segments_arr", "nonConcurrent") { try { String backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -217,10 +174,7 @@ suite("test_index_compaction_with_multi_index_segments_arr", "nonConcurrent") { assert (rowsetCount == 3 * replicaNum) // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets) @@ -259,10 +213,7 @@ suite("test_index_compaction_with_multi_index_segments_arr", "nonConcurrent") { assert (rowsetCount == 2 * replicaNum) } // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets) @@ -341,10 +292,7 @@ suite("test_index_compaction_with_multi_index_segments_arr", "nonConcurrent") { assert (rowsetCount == 3 * replicaNum) // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets) @@ -370,7 +318,7 @@ suite("test_index_compaction_with_multi_index_segments_arr", "nonConcurrent") { ("2018-02-21 19:00:00", [8], ["I\'m using the builds"]), ("2018-02-21 20:00:00", [9], ["I\'m using the builds"]), ("2018-02-21 21:00:00", [10], ["I\'m using the builds"]); """ - + sql """ select * from ${tableName} """ tablets = sql_return_maparray """ show tablets from ${tableName}; """ @@ -383,10 +331,7 @@ suite("test_index_compaction_with_multi_index_segments_arr", "nonConcurrent") { assert (rowsetCount == 2 * replicaNum) } // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets) diff --git a/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_cumulative_compaction.groovy b/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_cumulative_compaction.groovy index 731db243bfcf19..27789533420d2f 100644 --- a/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_cumulative_compaction.groovy +++ b/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_cumulative_compaction.groovy @@ -142,36 +142,6 @@ suite("test_index_change_with_cumulative_compaction", "nonConcurrent") { sql """ CREATE INDEX idx_city ON ${tableName}(`city`) USING INVERTED """ wait_for_latest_op_on_table_finish(tableName, timeout) - // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative") - - String command = sb.toString() - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - // build index if (!isCloudMode()) { sql "build index idx_user_id on ${tableName}" @@ -179,34 +149,8 @@ suite("test_index_change_with_cumulative_compaction", "nonConcurrent") { sql "build index idx_city on ${tableName}" } - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + // trigger compactions for all tablets in ${tableName} + trigger_and_wait_compaction(tableName, "cumulative") int rowCount = 0 for (def tablet in tablets) { diff --git a/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_full_compaction.groovy b/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_full_compaction.groovy index f8d807ffb563d9..ff485a71ec010e 100644 --- a/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_full_compaction.groovy +++ b/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_full_compaction.groovy @@ -142,36 +142,6 @@ suite("test_index_change_with_full_compaction") { sql """ CREATE INDEX idx_city ON ${tableName}(`city`) USING INVERTED """ wait_for_latest_op_on_table_finish(tableName, timeout) - // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=full") - - String command = sb.toString() - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - // build index if (!isCloudMode()) { sql "build index idx_user_id on ${tableName}" @@ -179,34 +149,8 @@ suite("test_index_change_with_full_compaction") { sql "build index idx_city on ${tableName}" } - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + // trigger compactions for all tablets in ${tableName} + trigger_and_wait_compaction(tableName, "full") int rowCount = 0 for (def tablet in tablets) { diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy index ce6a7e7c6a355c..c5617e7d6c8be1 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy @@ -26,7 +26,7 @@ suite("test_index_compaction_dup_keys", "nonConcurrent") { sql """ set global enable_match_without_inverted_index = false """ boolean disableAutoCompaction = false - + def set_be_config = { key, value -> for (String backend_id: backendId_to_backendIP.keySet()) { def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) @@ -34,49 +34,6 @@ suite("test_index_compaction_dup_keys", "nonConcurrent") { } } - def trigger_full_compaction_on_tablets = { tablets -> - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - int times = 1 - - String compactionStatus; - do{ - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - compactionStatus = parseJson(out.trim()).status.toLowerCase(); - } while (compactionStatus!="success" && times<=10) - - - if (compactionStatus == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactionStatus) - } - } - } - - def wait_full_compaction_done = { tablets -> - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def get_rowset_count = { tablets -> int rowsetCount = 0 for (def tablet in tablets) { @@ -112,7 +69,7 @@ suite("test_index_compaction_dup_keys", "nonConcurrent") { String backend_id; backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -181,10 +138,7 @@ suite("test_index_compaction_dup_keys", "nonConcurrent") { assert (rowsetCount == 7 * replicaNum) // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets); @@ -219,10 +173,7 @@ suite("test_index_compaction_dup_keys", "nonConcurrent") { assert (rowsetCount == 7 * replicaNum) } // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets); diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_empty_segments.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_empty_segments.groovy index 1c70c9e8e5027b..1f858d9de9616a 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_empty_segments.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_empty_segments.groovy @@ -34,7 +34,7 @@ suite("test_index_compaction_empty_segments", "p0, nonConcurrent") { set_be_config.call("inverted_index_compaction_enable", "true") sql "DROP TABLE IF EXISTS ${compaction_table_name}" - sql """ + sql """ CREATE TABLE ${compaction_table_name} ( `k` int(11) NULL, `v` varchar(20) NULL, @@ -58,31 +58,8 @@ suite("test_index_compaction_empty_segments", "p0, nonConcurrent") { def tablets = sql_return_maparray """ show tablets from ${compaction_table_name}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().atMost(10, TimeUnit.MINUTES).untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(compaction_table_name, "full") - for (def tablet in tablets) { int afterSegmentCount = 0 String tablet_id = tablet.TabletId diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy index e178d08baadd75..e88e1fcc30326b 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy @@ -29,8 +29,8 @@ suite("test_index_compaction_p0", "p0, nonConcurrent") { table "${table_name}" // set http request header params - set 'read_json_by_line', 'true' - set 'format', 'json' + set 'read_json_by_line', 'true' + set 'format', 'json' set 'max_filter_ratio', '0.1' file file_name // import json file time 10000 // limit inflight 10s @@ -51,7 +51,7 @@ suite("test_index_compaction_p0", "p0, nonConcurrent") { } } sql "DROP TABLE IF EXISTS ${compaction_table_name}" - sql """ + sql """ CREATE TABLE ${compaction_table_name} ( `@timestamp` int(11) NULL, `clientip` varchar(20) NULL, @@ -90,7 +90,7 @@ suite("test_index_compaction_p0", "p0, nonConcurrent") { //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus def tablets = sql_return_maparray """ show tablets from ${compaction_table_name}; """ - + for (def tablet in tablets) { int beforeSegmentCount = 0 String tablet_id = tablet.TabletId @@ -106,31 +106,7 @@ suite("test_index_compaction_p0", "p0, nonConcurrent") { } // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().atMost(1, TimeUnit.MINUTES).untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) - return compactionStatus.run_status; - }); - } - - + trigger_and_wait_compaction(compaction_table_name, "full") for (def tablet in tablets) { int afterSegmentCount = 0 String tablet_id = tablet.TabletId @@ -145,5 +121,5 @@ suite("test_index_compaction_p0", "p0, nonConcurrent") { } assertEquals(afterSegmentCount, 1) } - + } diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy index 87996687b93950..688a0dab144058 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy @@ -26,7 +26,7 @@ suite("test_index_compaction_unique_keys", "nonConcurrent") { sql """ set global enable_match_without_inverted_index = false """ boolean disableAutoCompaction = false - + def set_be_config = { key, value -> for (String backend_id: backendId_to_backendIP.keySet()) { def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) @@ -34,49 +34,6 @@ suite("test_index_compaction_unique_keys", "nonConcurrent") { } } - def trigger_full_compaction_on_tablets = { tablets -> - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - int times = 1 - - String compactionStatus; - do{ - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - compactionStatus = parseJson(out.trim()).status.toLowerCase(); - } while (compactionStatus!="success" && times<=10) - - - if (compactionStatus == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactionStatus) - } - } - } - - def wait_full_compaction_done = { tablets -> - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def get_rowset_count = { tablets -> int rowsetCount = 0 for (def tablet in tablets) { @@ -112,7 +69,7 @@ suite("test_index_compaction_unique_keys", "nonConcurrent") { String backend_id; backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -148,7 +105,7 @@ suite("test_index_compaction_unique_keys", "nonConcurrent") { UNIQUE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( + PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = "true", "enable_unique_key_merge_on_write" = "true", @@ -186,10 +143,7 @@ suite("test_index_compaction_unique_keys", "nonConcurrent") { assert (rowsetCount == 7 * replicaNum) // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets); @@ -225,10 +179,7 @@ suite("test_index_compaction_unique_keys", "nonConcurrent") { } // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets); diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy index edcf41db13c877..5d9c53ccfb23d7 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy @@ -26,7 +26,7 @@ suite("test_index_compaction_with_multi_index_segments", "nonConcurrent") { sql """ set global enable_match_without_inverted_index = false """ boolean disableAutoCompaction = false - + def set_be_config = { key, value -> for (String backend_id: backendId_to_backendIP.keySet()) { def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) @@ -34,49 +34,6 @@ suite("test_index_compaction_with_multi_index_segments", "nonConcurrent") { } } - def trigger_full_compaction_on_tablets = { tablets -> - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - int times = 1 - - String compactionStatus; - do{ - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - compactionStatus = parseJson(out.trim()).status.toLowerCase(); - } while (compactionStatus!="success" && times<=10) - - - if (compactionStatus == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactionStatus) - } - } - } - - def wait_full_compaction_done = { tablets -> - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def get_rowset_count = { tablets -> int rowsetCount = 0 for (def tablet in tablets) { @@ -113,7 +70,7 @@ suite("test_index_compaction_with_multi_index_segments", "nonConcurrent") { try { String backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -212,10 +169,7 @@ suite("test_index_compaction_with_multi_index_segments", "nonConcurrent") { assert (rowsetCount == 3 * replicaNum) // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets) @@ -254,10 +208,7 @@ suite("test_index_compaction_with_multi_index_segments", "nonConcurrent") { assert (rowsetCount == 2 * replicaNum) } // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets) @@ -337,11 +288,7 @@ suite("test_index_compaction_with_multi_index_segments", "nonConcurrent") { assert (rowsetCount == 3 * replicaNum) // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) - + trigger_and_wait_compaction.call(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets) if (isCloudMode) { @@ -366,7 +313,7 @@ suite("test_index_compaction_with_multi_index_segments", "nonConcurrent") { ("2018-02-21 19:00:00", 8, "I\'m using the builds"), ("2018-02-21 20:00:00", 9, "I\'m using the builds"), ("2018-02-21 21:00:00", 10, "I\'m using the builds"); """ - + sql """ select * from ${tableName} """ tablets = sql_return_maparray """ show tablets from ${tableName}; """ @@ -379,10 +326,7 @@ suite("test_index_compaction_with_multi_index_segments", "nonConcurrent") { assert (rowsetCount == 2 * replicaNum) } // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction(tableName, "full") // after full compaction, there is only 1 rowset. rowsetCount = get_rowset_count.call(tablets) diff --git a/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy b/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy index 003ac3c4c30fdb..eff05b1fd8daa4 100644 --- a/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy +++ b/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy @@ -40,7 +40,7 @@ suite("test_cumulative_compaction_with_format_v2", "inverted_index_format_v2") { assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish timeout") } - def calc_segment_count = { tablet -> + def calc_segment_count = { tablet -> int segment_count = 0 String tablet_id = tablet.TabletId StringBuilder sb = new StringBuilder(); @@ -163,69 +163,11 @@ suite("test_cumulative_compaction_with_format_v2", "inverted_index_format_v2") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - String ip = backendId_to_backendIP.get(backend_id) - String port = backendId_to_backendHttpPort.get(backend_id) - int segment_count = calc_segment_count(tablet) - logger.info("TabletId: " + tablet_id + ", segment_count: " + segment_count) - check_nested_index_file(ip, port, tablet_id, 9, 3, "V2") - - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative") - - String command = sb.toString() - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - + trigger_and_wait_compaction(tableName, "cumulative") // wait for all compactions done for (def tablet in tablets) { - boolean running = true String tablet_id = tablet.TabletId backend_id = tablet.BackendId - do { - Thread.sleep(1000) - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - String ip = backendId_to_backendIP.get(backend_id) String port = backendId_to_backendHttpPort.get(backend_id) int segment_count = calc_segment_count(tablet) diff --git a/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy b/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy index ed19dd75d1f617..61aadbcd0bb102 100644 --- a/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy +++ b/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy @@ -40,7 +40,7 @@ suite("test_mor_table_with_format_v2", "inverted_index_format_v2") { assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish timeout") } - def calc_segment_count = { tablet -> + def calc_segment_count = { tablet -> int segment_count = 0 String tablet_id = tablet.TabletId StringBuilder sb = new StringBuilder(); @@ -165,69 +165,12 @@ suite("test_mor_table_with_format_v2", "inverted_index_format_v2") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - String ip = backendId_to_backendIP.get(backend_id) - String port = backendId_to_backendHttpPort.get(backend_id) - int segment_count = calc_segment_count(tablet) - logger.info("TabletId: " + tablet_id + ", segment_count: " + segment_count) - check_nested_index_file(ip, port, tablet_id, 10, 3, "V2") - - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=full") - - String command = sb.toString() - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } + trigger_and_wait_compaction(tableName, "full") // wait for all compactions done for (def tablet in tablets) { - boolean running = true String tablet_id = tablet.TabletId backend_id = tablet.BackendId - do { - Thread.sleep(1000) - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - String ip = backendId_to_backendIP.get(backend_id) String port = backendId_to_backendHttpPort.get(backend_id) // after compaction, there are 1 rwoset in local mode and 2 rowsets in cloud mode. diff --git a/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy b/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy index 750d5fd2e690c2..65827d038bde17 100644 --- a/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy +++ b/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy @@ -40,7 +40,7 @@ suite("test_mow_table_with_format_v2", "inverted_index_format_v2") { assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish timeout") } - def calc_segment_count = { tablet -> + def calc_segment_count = { tablet -> int segment_count = 0 String tablet_id = tablet.TabletId StringBuilder sb = new StringBuilder(); @@ -163,69 +163,12 @@ suite("test_mow_table_with_format_v2", "inverted_index_format_v2") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - String ip = backendId_to_backendIP.get(backend_id) - String port = backendId_to_backendHttpPort.get(backend_id) - int segment_count = calc_segment_count(tablet) - logger.info("TabletId: " + tablet_id + ", segment_count: " + segment_count) - check_nested_index_file(ip, port, tablet_id, 9, 3, "V2") - - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative") - - String command = sb.toString() - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done + trigger_and_wait_compaction(tableName, "cumulative") + // check indexes for (def tablet in tablets) { boolean running = true String tablet_id = tablet.TabletId backend_id = tablet.BackendId - do { - Thread.sleep(1000) - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - String ip = backendId_to_backendIP.get(backend_id) String port = backendId_to_backendHttpPort.get(backend_id) check_nested_index_file(ip, port, tablet_id, 2, 3, "V2") diff --git a/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy b/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy index 25924937a66282..75bc7bd456f499 100644 --- a/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy +++ b/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy @@ -46,7 +46,7 @@ suite("test_single_replica_compaction_with_format_v2", "inverted_index_format_v2 assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish timeout") } - def calc_segment_count = { tablet -> + def calc_segment_count = { tablet -> int segment_count = 0 String tablet_id = tablet.TabletId StringBuilder sb = new StringBuilder(); @@ -167,67 +167,10 @@ suite("test_single_replica_compaction_with_format_v2", "inverted_index_format_v2 def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} + trigger_and_wait_compaction(tableName, "cumulative") for (def tablet in tablets) { String tablet_id = tablet.TabletId backend_id = tablet.BackendId - String ip = backendId_to_backendIP.get(backend_id) - String port = backendId_to_backendHttpPort.get(backend_id) - check_nested_index_file(ip, port, tablet_id, 9, 3, "V2") - - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative") - - String command = sb.toString() - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - do { - Thread.sleep(1000) - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://") - sb.append(backendId_to_backendIP.get(backend_id)) - sb.append(":") - sb.append(backendId_to_backendHttpPort.get(backend_id)) - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - String ip = backendId_to_backendIP.get(backend_id) String port = backendId_to_backendHttpPort.get(backend_id) check_nested_index_file(ip, port, tablet_id, 2, 3, "V2") diff --git a/regression-test/suites/inverted_index_p0/test_inverted_index_file_size.groovy b/regression-test/suites/inverted_index_p0/test_inverted_index_file_size.groovy index a2748cb93c9413..73446089bb209c 100644 --- a/regression-test/suites/inverted_index_p0/test_inverted_index_file_size.groovy +++ b/regression-test/suites/inverted_index_p0/test_inverted_index_file_size.groovy @@ -35,7 +35,7 @@ suite("test_inverted_index_file_size", "nonConcurrent"){ // load the json data streamLoad { table "${tableName}" - + set 'read_json_by_line', 'true' set 'format', 'json' file 'documents-1000.json' // import json file @@ -55,42 +55,6 @@ suite("test_inverted_index_file_size", "nonConcurrent"){ } } - def run_compaction_and_wait = { - //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - def tablets = sql_return_maparray """ show tablets from ${tableName}; """ - - // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - logger.info("Compaction was done automatically!") - } else { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def test_table = { format -> sql "DROP TABLE IF EXISTS ${tableName}" sql """ @@ -123,7 +87,7 @@ suite("test_inverted_index_file_size", "nonConcurrent"){ qt_sql """ select count() from ${tableName} where clientip match '17.0.0.0' and request match 'GET' and status match '200' and size > 200 """ qt_sql """ select count() from ${tableName} where clientip match_phrase '17.0.0.0' and request match_phrase 'GET' and status match '200' and size > 200 """ - run_compaction_and_wait.call() + trigger_and_wait_compaction.call(tableName, "full") qt_sql """ select count() from ${tableName} where clientip match '17.0.0.0' and request match 'GET' and status match '200' and size > 200 """ qt_sql """ select count() from ${tableName} where clientip match_phrase '17.0.0.0' and request match_phrase 'GET' and status match '200' and size > 200 """ @@ -141,5 +105,5 @@ suite("test_inverted_index_file_size", "nonConcurrent"){ GetDebugPoint().disableDebugPointForAllBEs("file_size_not_in_rowset_meta") set_be_config.call("inverted_index_compaction_enable", "true") } - -} \ No newline at end of file + +} diff --git a/regression-test/suites/inverted_index_p0/test_inverted_index_v2_file_size.groovy b/regression-test/suites/inverted_index_p0/test_inverted_index_v2_file_size.groovy index 5e22a7ae89270a..e9ab66bbad40b1 100644 --- a/regression-test/suites/inverted_index_p0/test_inverted_index_v2_file_size.groovy +++ b/regression-test/suites/inverted_index_p0/test_inverted_index_v2_file_size.groovy @@ -23,7 +23,7 @@ suite("test_index_index_V2_file_size", "nonConcurrent") { def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - + def set_be_config = { key, value -> for (String backend_id: backendId_to_backendIP.keySet()) { def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) @@ -31,45 +31,6 @@ suite("test_index_index_V2_file_size", "nonConcurrent") { } } - def trigger_full_compaction_on_tablets = { tablets -> - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - int times = 1 - - String compactionStatus; - do{ - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - compactionStatus = parseJson(out.trim()).status.toLowerCase(); - } while (compactionStatus!="success" && times<=10) - - - if (compactionStatus == "fail") { - logger.info("Compaction was done automatically!") - } - } - } - - def wait_full_compaction_done = { tablets -> - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def get_rowset_count = { tablets -> int rowsetCount = 0 for (def tablet in tablets) { @@ -88,7 +49,7 @@ suite("test_index_index_V2_file_size", "nonConcurrent") { String backend_id; backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -139,10 +100,7 @@ suite("test_index_index_V2_file_size", "nonConcurrent") { qt_sql """ select * from ${tableName} where score < 100 order by id, name, hobbies, score """ // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction.call(tableName, "full") def dedup_tablets = deduplicate_tablets(tablets) @@ -177,10 +135,7 @@ suite("test_index_index_V2_file_size", "nonConcurrent") { set_be_config.call("inverted_index_compaction_enable", "false") // trigger full compactions for all tablets in ${tableName} - trigger_full_compaction_on_tablets.call(tablets) - - // wait for full compaction done - wait_full_compaction_done.call(tablets) + trigger_and_wait_compaction.call(tableName, "full") // after full compaction, there is only 1 rowset. count = get_rowset_count.call(tablets); diff --git a/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy b/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy index c55ae3c233bec0..176a1dbd8d89d8 100644 --- a/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy +++ b/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy @@ -29,8 +29,8 @@ suite("test_index_compaction_p1", "p1, nonConcurrent") { table "${table_name}" // set http request header params - set 'read_json_by_line', 'true' - set 'format', 'json' + set 'read_json_by_line', 'true' + set 'format', 'json' set 'max_filter_ratio', '0.1' file file_name // import json file time 10000 // limit inflight 10s @@ -51,7 +51,7 @@ suite("test_index_compaction_p1", "p1, nonConcurrent") { } } sql "DROP TABLE IF EXISTS ${compaction_table_name}" - sql """ + sql """ CREATE TABLE ${compaction_table_name} ( `@timestamp` int(11) NULL, `clientip` varchar(20) NULL, @@ -95,7 +95,7 @@ suite("test_index_compaction_p1", "p1, nonConcurrent") { //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus def tablets = sql_return_maparray """ show tablets from ${compaction_table_name}; """ - + for (def tablet in tablets) { int beforeSegmentCount = 0 String tablet_id = tablet.TabletId @@ -111,30 +111,7 @@ suite("test_index_compaction_p1", "p1, nonConcurrent") { } // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().atMost(10, TimeUnit.MINUTES).untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) - return compactionStatus.run_status; - }); - } - + trigger_and_wait_compaction(compaction_table_name, "full") for (def tablet in tablets) { int afterSegmentCount = 0 String tablet_id = tablet.TabletId diff --git a/regression-test/suites/inverted_index_p1/need_read_data/test_dup_table_inverted_index.groovy b/regression-test/suites/inverted_index_p1/need_read_data/test_dup_table_inverted_index.groovy index 05d75cc540c357..b7e83468538c91 100644 --- a/regression-test/suites/inverted_index_p1/need_read_data/test_dup_table_inverted_index.groovy +++ b/regression-test/suites/inverted_index_p1/need_read_data/test_dup_table_inverted_index.groovy @@ -16,7 +16,7 @@ // under the License. suite("test_dup_table_inverted_index", "p1") { - + // load data def load_data = { loadTableName, fileName -> streamLoad { @@ -59,51 +59,6 @@ suite("test_dup_table_inverted_index", "p1") { } } - def run_compaction = { compactionTableName -> - String backend_id; - - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - def tablets = sql_return_maparray """ show tablets from ${compactionTableName}; """ - - // run - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - times = 1 - - do{ - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) - - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - logger.info("Compaction was done automatically!") - } - } - - // wait - for (def tablet : tablets) { - boolean running = true - do { - Thread.sleep(1000) - def tablet_id = tablet.TabletId - backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - // def generate_dup_mow_sql = { tableName -> // List list = new ArrayList<>() // // FULLTEXT @@ -174,7 +129,7 @@ suite("test_dup_table_inverted_index", "p1") { // <= list.add("SELECT COUNT(*) FROM ${tableName} WHERE request <= 'POST'"); list.add("SELECT id FROM ${tableName} WHERE request <= 'POST' ORDER BY id LIMIT 2"); - + // > list.add("SELECT COUNT(*) FROM ${tableName} WHERE request > 'POST'"); list.add("SELECT id FROM ${tableName} WHERE request > 'POST' ORDER BY id LIMIT 2"); @@ -418,11 +373,11 @@ suite("test_dup_table_inverted_index", "p1") { list.add("SELECT COUNT(*) FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request <= 'POST';"); list.add("SELECT id FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request <= 'POST' ORDER BY id LIMIT 2;"); - list.add("SELECT COUNT(*) FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request > 'POST' ") - list.add("SELECT id FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request > 'POST' ORDER BY id LIMIT 2 ") + list.add("SELECT COUNT(*) FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request > 'POST' ") + list.add("SELECT id FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request > 'POST' ORDER BY id LIMIT 2 ") - list.add("SELECT COUNT(*) FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request >= 'POST' ") - list.add("SELECT id FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request >= 'POST' ORDER BY id LIMIT 2 ") + list.add("SELECT COUNT(*) FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request >= 'POST' ") + list.add("SELECT id FROM ${tableName} WHERE request MATCH_REGEXP 'GET' AND request >= 'POST' ORDER BY id LIMIT 2 ") // FULLTEXT MATCH_PHRASE_EDGE with others list.add("SELECT COUNT(*) FROM ${tableName} WHERE request MATCH_PHRASE_EDGE 'GET' AND request LIKE '%GET%' "); @@ -713,8 +668,8 @@ suite("test_dup_table_inverted_index", "p1") { list.add("SELECT `@timestamp` FROM ${tableName} WHERE id != 200 OR request LIKE '%GET%' OR NOT (size > 100 AND size LIKE '%0%') OR clientip > '17.0.0.0' ORDER BY id LIMIT 2;"); return list } - - + + try { sql """ set enable_match_without_inverted_index = true """ sql """ set enable_common_expr_pushdown = true """ @@ -723,7 +678,7 @@ suite("test_dup_table_inverted_index", "p1") { // create table sql """ CREATE TABLE IF NOT EXISTS dup_httplogs - ( + ( `id` bigint NOT NULL AUTO_INCREMENT(100), `@timestamp` int(11) NULL, `clientip` varchar(20) NULL, @@ -787,7 +742,7 @@ suite("test_dup_table_inverted_index", "p1") { logger.info("dup_result4 is {}", dup_result4); compare_result(dup_result3, dup_result4, all_dup_sql) - run_compaction.call(dupTableName) + trigger_and_wait_compaction(dupTableName, "full") def dup_result5 = execute_sql.call("enable_no_need_read_data_opt", "true", all_dup_sql) logger.info("dup_result5 is {}", dup_result5); def dup_result6 = execute_sql.call("enable_no_need_read_data_opt", "false", all_dup_sql) @@ -807,7 +762,7 @@ suite("test_dup_table_inverted_index", "p1") { // create table sql """ CREATE TABLE IF NOT EXISTS mow_httplogs - ( + ( `@timestamp` int(11) NULL, `clientip` varchar(20) NULL, `request` text NULL, @@ -854,7 +809,7 @@ suite("test_dup_table_inverted_index", "p1") { sql """ INSERT INTO ${mowTable} (`@timestamp`, clientip, request, status, size) VALUES (100, '1zyvBkWVAy5H0DDDaQnrp9MmAhfo0UNB9bOGyvSEX9MW66eymeDElVzVmsvUKHORwWg2hRLN7yd253zhXGs6k7PVPHy6uqtYTaxvHWm7njZYWtlqraDGE1fvrtnyUvlrGFPJZzkuj5FQfpYl6dV2bJHV0A3gpzogKSXJSyfH02ryb2ObKaJC6dnkMic00P6R3rUCBotrU7KAaGieALbFUXBGTvjsFKUvLgJexqAEJcKwiioTp0JH9Y3NUWgi2y5kclPmUG4xVrKHWXu7bI1MYJ1DCL1eCQCuqXmUf7eFyKcR6pzTFpkurcYq5R3SjprK13EkuLmVcDJMS8DNiLVCcCIOpHQMNgVFNLI7SPCl461FPOrL1xSuULAsLNjP5xgjjpn5Bu2dAug906fSVcnJwfHuuCly0sqYfNEI0Bd1IMiQOyoqA1pwdJMYMa6hig6imR3bJcnPptA6Fo1rooqzzt6gFnloqXeo9Hd9UB1F7QhfZO21QOZho19A5d12wcnOZCb3sRzomQqcPKSyvb17SxzoP9coAEpfXZEBrds60iuPjZaez79zeGP8X4KxuK1WwVDFw661zB6nvKCtNKFQqeKVMSFWAazw735TkQRGkjlif31f3uspvmBrLagvtjlfMoT138NnNxc2FbsK5wmssNfKFRk9zNg629b46rX7qLnC3ItPYgXyPSFqSF7snjqOUHJpzvcPhyY7tuDZVW2VTd3OtRdjdlAwHbSUrI5jWI1BCeP8cObIsOjd5', '1zyvBkWVAy5H0DDDaQnrp9MmAhfo0UNB9bOGyvSEX9MW66eymeDElVzVmsvUKHORwWg2hRLN7yd253zhXGs6k7PVPHy6uqtYTaxvHWm7njZYWtlqraDGE1fvrtnyUvlrGFPJZzkuj5FQfpYl6dV2bJHV0A3gpzogKSXJSyfH02ryb2ObKaJC6dnkMic00P6R3rUCBotrU7KAaGieALbFUXBGTvjsFKUvLgJexqAEJcKwiioTp0JH9Y3NUWgi2y5kclPmUG4xVrKHWXu7bI1MYJ1DCL1eCQCuqXmUf7eFyKcR6pzTFpkurcYq5R3SjprK13EkuLmVcDJMS8DNiLVCcCIOpHQMNgVFNLI7SPCl461FPOrL1xSuULAsLNjP5xgjjpn5Bu2dAug906fSVcnJwfHuuCly0sqYfNEI0Bd1IMiQOyoqA1pwdJMYMa6hig6imR3bJcnPptA6Fo1rooqzzt6gFnloqXeo9Hd9UB1F7QhfZO21QOZho19A5d12wcnOZCb3sRzomQqcPKSyvb17SxzoP9coAEpfXZEBrds60iuPjZaez79zeGP8X4KxuK1WwVDFw661zB6nvKCtNKFQqeKVMSFWAazw735TkQRGkjlif31f3uspvmBrLagvtjlfMoT138NnNxc2FbsK5wmssNfKFRk9zNg629b46rX7qLnC3ItPYgXyPSFqSF7snjqOUHJpzvcPhyY7tuDZVW2VTd3OtRdjdlAwHbSUrI5jWI1BCeP8cObIsOjd5', -2, -3) """ sql """ INSERT INTO ${mowTable} (`@timestamp`, clientip, request, status, size) VALUES (100, '1zyvBkWVAy5H0DDDaQnrp9MmAhfo0UNB9bOGyvSEX9MW66eymeDElVzVmsvUKHORwWg2hRLN7yd253zhXGs6k7PVPHy6uqtYTaxvHWm7njZYWtlqraDGE1fvrtnyUvlrGFPJZzkuj5FQfpYl6dV2bJHV0A3gpzogKSXJSyfH02ryb2ObKaJC6dnkMic00P6R3rUCBotrU7KAaGieALbFUXBGTvjsFKUvLgJexqAEJcKwiioTp0JH9Y3NUWgi2y5kclPmUG4xVrKHWXu7bI1MYJ1DCL1eCQCuqXmUf7eFyKcR6pzTFpkurcYq5R3SjprK13EkuLmVcDJMS8DNiLVCcCIOpHQMNgVFNLI7SPCl461FPOrL1xSuULAsLNjP5xgjjpn5Bu2dAug906fSVcnJwfHuuCly0sqYfNEI0Bd1IMiQOyoqA1pwdJMYMa6hig6imR3bJcnPptA6Fo1rooqzzt6gFnloqXeo9Hd9UB1F7QhfZO21QOZho19A5d12wcnOZCb3sRzomQqcPKSyvb17SxzoP9coAEpfXZEBrds60iuPjZaez79zeGP8X4KxuK1WwVDFw661zB6nvKCtNKFQqeKVMSFWAazw735TkQRGkjlif31f3uspvmBrLagvtjlfMoT138NnNxc2FbsK5wmssNfKFRk9zNg629b46rX7qLnC3ItPYgXyPSFqSF7snjqOUHJpzvcPhyY7tuDZVW2VTd3OtRdjdlAwHbSUrI5jWI1BCeP8cObIsOjd5', '1zyvBkWVAy5H0DDDaQnrp9MmAhfo0UNB9bOGyvSEX9MW66eymeDElVzVmsvUKHORwWg2hRLN7yd253zhXGs6k7PVPHy6uqtYTaxvHWm7njZYWtlqraDGE1fvrtnyUvlrGFPJZzkuj5FQfpYl6dV2bJHV0A3gpzogKSXJSyfH02ryb2ObKaJC6dnkMic00P6R3rUCBotrU7KAaGieALbFUXBGTvjsFKUvLgJexqAEJcKwiioTp0JH9Y3NUWgi2y5kclPmUG4xVrKHWXu7bI1MYJ1DCL1eCQCuqXmUf7eFyKcR6pzTFpkurcYq5R3SjprK13EkuLmVcDJMS8DNiLVCcCIOpHQMNgVFNLI7SPCl461FPOrL1xSuULAsLNjP5xgjjpn5Bu2dAug906fSVcnJwfHuuCly0sqYfNEI0Bd1IMiQOyoqA1pwdJMYMa6hig6imR3bJcnPptA6Fo1rooqzzt6gFnloqXeo9Hd9UB1F7QhfZO21QOZho19A5d12wcnOZCb3sRzomQqcPKSyvb17SxzoP9coAEpfXZEBrds60iuPjZaez79zeGP8X4KxuK1WwVDFw661zB6nvKCtNKFQqeKVMSFWAazw735TkQRGkjlif31f3uspvmBrLagvtjlfMoT138NnNxc2FbsK5wmssNfKFRk9zNg629b46rX7qLnC3ItPYgXyPSFqSF7snjqOUHJpzvcPhyY7tuDZVW2VTd3OtRdjdlAwHbSUrI5jWI1BCeP8cObIsOjd5', -2, -3) """ sql """ sync """ - + def all_mow_sql = generate_dup_mow_sql.call(mowTable) def mow_result1 = execute_sql.call("enable_no_need_read_data_opt", "true", all_mow_sql) logger.info("mow_result1 is {}", mow_result1); @@ -875,7 +830,7 @@ suite("test_dup_table_inverted_index", "p1") { logger.info("mow_result4 is {}", mow_result4); compare_result(mow_result3, mow_result4, all_mow_sql) - run_compaction.call(mowTable) + trigger_and_wait_compaction(mowTable, "full") def mow_result5 = execute_sql.call("enable_no_need_read_data_opt", "true", all_mow_sql) logger.info("mow_result5 is {}", mow_result5); def mow_result6 = execute_sql.call("enable_no_need_read_data_opt", "false", all_mow_sql) @@ -901,4 +856,4 @@ suite("test_dup_table_inverted_index", "p1") { } finally { sql """ set enable_match_without_inverted_index = true """ } -} \ No newline at end of file +} diff --git a/regression-test/suites/inverted_index_p2/test_show_data.groovy b/regression-test/suites/inverted_index_p2/test_show_data.groovy index fe9aa45b9b4f2d..91939d5507d8dc 100644 --- a/regression-test/suites/inverted_index_p2/test_show_data.groovy +++ b/regression-test/suites/inverted_index_p2/test_show_data.groovy @@ -629,7 +629,7 @@ suite("test_show_data_with_compaction", "p2") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -742,42 +742,6 @@ suite("test_show_data_with_compaction", "p2") { return "wait_timeout" } - def run_compaction_and_wait = { tableName -> - //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - def tablets = sql_return_maparray """ show tablets from ${tableName}; """ - - // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - logger.info("Compaction was done automatically!") - } else { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def create_table_run_compaction_and_wait = { test_name -> sql """ DROP TABLE IF EXISTS ${test_name}; """ sql """ @@ -804,14 +768,14 @@ suite("test_show_data_with_compaction", "p2") { sql """ INSERT INTO ${test_name} VALUES (3, "bason", "bason hate pear", 99); """ def data_size = wait_for_show_data_finish(test_name, 60000, 0) assertTrue(data_size != "wait_timeout") - run_compaction_and_wait(test_name) + trigger_and_wait_compaction(test_name, "full") data_size = wait_for_show_data_finish(test_name, 60000, data_size) assertTrue(data_size != "wait_timeout") return data_size } try { - + set_be_config.call("inverted_index_compaction_enable", "true") sql "DROP TABLE IF EXISTS ${tableWithIndexCompaction}" create_table_with_index.call(tableWithIndexCompaction) @@ -826,7 +790,7 @@ suite("test_show_data_with_compaction", "p2") { def with_index_size = wait_for_show_data_finish(tableWithIndexCompaction, 60000, 0) assertTrue(with_index_size != "wait_timeout") - run_compaction_and_wait(tableWithIndexCompaction) + trigger_and_wait_compaction(tableWithIndexCompaction, "full") with_index_size = wait_for_show_data_finish(tableWithIndexCompaction, 60000, with_index_size) assertTrue(with_index_size != "wait_timeout") @@ -842,7 +806,7 @@ suite("test_show_data_with_compaction", "p2") { def another_with_index_size = wait_for_show_data_finish(tableWithOutIndexCompaction, 60000, 0) assertTrue(another_with_index_size != "wait_timeout") - run_compaction_and_wait(tableWithOutIndexCompaction) + trigger_and_wait_compaction(tableWithOutIndexCompaction, "full") another_with_index_size = wait_for_show_data_finish(tableWithOutIndexCompaction, 60000, another_with_index_size) assertTrue(another_with_index_size != "wait_timeout") diff --git a/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy b/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy index fd22c12afaf545..eaf3694f41ce64 100644 --- a/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy +++ b/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy @@ -89,7 +89,7 @@ suite("test_map_load_and_compaction", "p0") { for (int i = 0; i < 5; ++i) { streamLoadJson.call(4063, dataFile1) } - + sql """sync""" // check result @@ -105,31 +105,7 @@ suite("test_map_load_and_compaction", "p0") { checkCompactionStatus.call(compactionStatus, 6) // trigger compaction - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - - def running = false - - // wait compactions done - do { - Thread.sleep(1000) - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def cs = parseJson(out.trim()) - assertEquals("success", cs.status.toLowerCase()) - running = cs.run_status - } while (running) - + trigger_and_wait_compaction(testTable, "cumulative") checkCompactionStatus.call(compactionStatus, 1) // finally check backend alive diff --git a/regression-test/suites/pipeline_p0/statitics_compaction.groovy b/regression-test/suites/pipeline_p0/statitics_compaction.groovy index aebe55939d5609..c32c88e5a5c744 100644 --- a/regression-test/suites/pipeline_p0/statitics_compaction.groovy +++ b/regression-test/suites/pipeline_p0/statitics_compaction.groovy @@ -25,36 +25,7 @@ suite("statistic_table_compaction", "nonConcurrent,p0") { def tablets = sql_return_maparray """show tablets from ${table}""" // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - assertEquals("success", compactJson.status.toLowerCase()) - } - - Integer counter = 600 - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - counter -= 1 - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running && counter > 0) - } - - assertTrue(counter >= 0) + trigger_and_wait_compaction(table, "full") } catch (Exception e) { logger.info(e.getMessage()) if (e.getMessage().contains("Unknown table")) { diff --git a/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy b/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy index 8afc4fd6c110ab..14a5f046831203 100644 --- a/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy +++ b/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy @@ -33,37 +33,12 @@ suite("test_agg_keys_schema_change_datev2") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) assert configList instanceof List - def do_compact = { tableName -> - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } - } - sql """ DROP TABLE IF EXISTS ${tbName} FORCE""" // Create table and disable light weight schema change sql """ @@ -104,7 +79,7 @@ suite("test_agg_keys_schema_change_datev2") { sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - do_compact(tbName) + trigger_and_wait_compaction(tbName, "cumulative") sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" sql """delete from ${tbName} where `datev3` = '2022-01-01';""" @@ -143,7 +118,7 @@ suite("test_agg_keys_schema_change_datev2") { }); sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - do_compact(tbName) + trigger_and_wait_compaction(tbName, "cumulative") sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" sql """delete from ${tbName} where `datev3` = '2022-01-01 11:11:11';""" @@ -173,7 +148,7 @@ suite("test_agg_keys_schema_change_datev2") { sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" sql """ alter table ${tbName} add column `datev3` datetimev2(3) DEFAULT '2022-01-01 11:11:11.111' """ - + Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { String result = getJobState(tbName) if (result == "FINISHED") { @@ -184,7 +159,7 @@ suite("test_agg_keys_schema_change_datev2") { sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" - do_compact(tbName) + trigger_and_wait_compaction(tbName, "cumulative") sql """sync""" qt_sql """select * from ${tbName} ORDER BY `datek1`;""" sql """delete from ${tbName} where `datev3` = '2022-01-01 11:11:11';""" diff --git a/regression-test/suites/schema_change_p0/datev2/test_schema_change_varchar_to_datev2.groovy b/regression-test/suites/schema_change_p0/datev2/test_schema_change_varchar_to_datev2.groovy index d01d8cd54f3747..56f8d288b4c512 100644 --- a/regression-test/suites/schema_change_p0/datev2/test_schema_change_varchar_to_datev2.groovy +++ b/regression-test/suites/schema_change_p0/datev2/test_schema_change_varchar_to_datev2.groovy @@ -38,31 +38,6 @@ suite("test_schema_change_varchar_to_datev2") { def configList = parseJson(out.trim()) assert configList instanceof List - def do_compact = { tableName -> - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } - } - sql """ DROP TABLE IF EXISTS ${tbName} FORCE""" // Create table and disable light weight schema change sql """ @@ -96,7 +71,7 @@ suite("test_schema_change_varchar_to_datev2") { sql """sync""" qt_sql_2 """select * from ${tbName} ORDER BY `k1`;""" - do_compact(tbName) + trigger_and_wait_compaction(tbName, "cumulative") sql """sync""" qt_sql_3 """select * from ${tbName} ORDER BY `k1`;""" sql """delete from ${tbName} where `k3` = '2020-01-02';""" diff --git a/regression-test/suites/schema_change_p0/decimalv2/test_agg_keys_schema_change_decimalv2.groovy b/regression-test/suites/schema_change_p0/decimalv2/test_agg_keys_schema_change_decimalv2.groovy index 295a034d8eee90..7d44bd1ee26bb4 100644 --- a/regression-test/suites/schema_change_p0/decimalv2/test_agg_keys_schema_change_decimalv2.groovy +++ b/regression-test/suites/schema_change_p0/decimalv2/test_agg_keys_schema_change_decimalv2.groovy @@ -52,31 +52,6 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { def configList = parseJson(out.trim()) assert configList instanceof List - def do_compact = { tableName -> - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id ) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } - } - sql """ DROP TABLE IF EXISTS ${tbName} FORCE""" // Create table and disable light weight schema change sql """ @@ -109,7 +84,7 @@ suite("test_agg_keys_schema_change_decimalv2", "nonConcurrent") { sql """sync""" qt_sql2 """select * from ${tbName} ORDER BY 1,2,3,4;""" - do_compact(tbName) + trigger_and_wait_compaction(tbName, "cumulative") sql """sync""" qt_sql3 """select * from ${tbName} ORDER BY 1,2,3,4;""" diff --git a/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy b/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy index fd28d01ed4595f..0cbd8ec0823644 100644 --- a/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy +++ b/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy @@ -39,31 +39,6 @@ suite("test_agg_keys_schema_change_decimalv3") { def configList = parseJson(out.trim()) assert configList instanceof List - def do_compact = { tableName -> - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id ) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } - } - sql """ DROP TABLE IF EXISTS ${tbName} FORCE""" // Create table and disable light weight schema change sql """ @@ -93,7 +68,7 @@ suite("test_agg_keys_schema_change_decimalv3") { sql """sync""" qt_sql """select * from ${tbName} ORDER BY `decimalv3k1`;""" - do_compact(tbName) + trigger_and_wait_compaction(tbName, "cumulative") sql """sync""" qt_sql """select * from ${tbName} ORDER BY `decimalv3k1`;""" sql """ alter table ${tbName} drop column `decimalv3v3` """ diff --git a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy index 13063ad34dc9db..a23ee27801dee1 100644 --- a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy @@ -22,7 +22,7 @@ suite ("test_agg_keys_schema_change") { def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ return jobStateResult[0][9] } - + def tableName = "schema_change_agg_keys_regression_test" try { @@ -74,7 +74,7 @@ suite ("test_agg_keys_schema_change") { // add key column case 1, not light schema change sql """ - ALTER table ${tableName} ADD COLUMN new_key_column INT default "2" + ALTER table ${tableName} ADD COLUMN new_key_column INT default "2" """ int max_try_time = 3000 @@ -173,31 +173,8 @@ suite ("test_agg_keys_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - } + trigger_and_wait_compaction(tableName, "cumulative") - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(100) - String tablet_id = tablet[0] - def backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - qt_sc """ select count(*) from ${tableName} """ qt_sc """ SELECT * FROM schema_change_agg_keys_regression_test WHERE user_id=2 """ diff --git a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy index ed6e31e5f6a88f..5ba33f3ff66b78 100644 --- a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy @@ -1,4 +1,4 @@ - + // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information @@ -148,32 +148,8 @@ suite ("test_agg_mv_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } + trigger_and_wait_compaction(tableName, "cumulative") - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(100) - String tablet_id = tablet[0] - def backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - qt_sc """ select count(*) from ${tableName} """ qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ @@ -184,4 +160,4 @@ suite ("test_agg_mv_schema_change") { } - + diff --git a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy index 719082e79bb054..73d9e9ad810601 100644 --- a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy @@ -1,4 +1,4 @@ - + // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information @@ -156,32 +156,8 @@ suite ("test_agg_rollup_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } + trigger_and_wait_compaction(tableName, "cumulative") - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(100) - String tablet_id = tablet[0] - def backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - qt_sc """ select count(*) from ${tableName} """ qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ diff --git a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy index 211bdaefe3c766..9983142986067a 100644 --- a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy @@ -78,7 +78,7 @@ suite ("test_agg_vals_schema_change") { // add column sql """ - ALTER table ${tableName} ADD COLUMN new_column INT MAX default "1" + ALTER table ${tableName} ADD COLUMN new_column INT MAX default "1" """ qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ @@ -136,31 +136,7 @@ suite ("test_agg_vals_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(100) - String tablet_id = tablet[0] - def backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") qt_sc """ select count(*) from ${tableName} """ qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ diff --git a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy index 6fe76138cd28ca..e5bc3f1450fd8c 100644 --- a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy @@ -75,7 +75,7 @@ suite ("test_dup_keys_schema_change") { // add column sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" + ALTER table ${tableName} ADD COLUMN new_column INT default "1" """ sql """ SELECT * FROM ${tableName} WHERE user_id=2 order by min_dwell_time """ @@ -139,29 +139,7 @@ suite ("test_dup_keys_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> { - String tablet_id = tablet[0] - def backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy index 7c3ea7703272bb..713c470436e7fb 100644 --- a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy @@ -93,7 +93,7 @@ suite ("test_dup_mv_schema_change") { // add column sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" + ALTER table ${tableName} ADD COLUMN new_column INT default "1" """ waitForJob(tableName, 3000) @@ -149,29 +149,7 @@ suite ("test_dup_mv_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy index 7f55b7fcd16e2c..a5e7172ba639b4 100644 --- a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy @@ -102,7 +102,7 @@ suite ("test_dup_rollup_schema_change") { // add column sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" + ALTER table ${tableName} ADD COLUMN new_column INT default "1" """ sql """ SELECT * FROM ${tableName} WHERE user_id=2 order by min_dwell_time """ @@ -167,29 +167,7 @@ suite ("test_dup_rollup_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") qt_sc """ select count(*) from ${tableName} """ diff --git a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy index e1914bb6c8f249..2348daad9fafb2 100644 --- a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy @@ -74,7 +74,7 @@ suite ("test_dup_vals_schema_change") { // add column sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" + ALTER table ${tableName} ADD COLUMN new_column INT default "1" """ sql """ SELECT * FROM ${tableName} WHERE user_id=2 order by min_dwell_time """ @@ -127,29 +127,7 @@ suite ("test_dup_vals_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") qt_sc """ select count(*) from ${tableName} """ qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 order by min_dwell_time """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy index 45c37051b43ec5..e45d1e6ac8c585 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy @@ -75,7 +75,7 @@ suite ("test_uniq_keys_schema_change") { // add column sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" + ALTER table ${tableName} ADD COLUMN new_column INT default "1" """ sql """ SELECT * FROM ${tableName} WHERE user_id=2 """ @@ -122,29 +122,7 @@ suite ("test_uniq_keys_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") qt_sc """ select count(*) from ${tableName} """ qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy index f2c961b5aa4c16..eba6036c30a628 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy @@ -30,7 +30,7 @@ suite ("test_uniq_mv_schema_change") { String result = getMVJobState(tbName) if (result == "FINISHED") { return true; - } + } return false; }); // when timeout awaitlity will raise a exception. @@ -104,7 +104,7 @@ suite ("test_uniq_mv_schema_change") { // add column sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" + ALTER table ${tableName} ADD COLUMN new_column INT default "1" """ sql """ SELECT * FROM ${tableName} WHERE user_id=2 """ @@ -165,29 +165,8 @@ suite ("test_uniq_mv_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } + trigger_and_wait_compaction(tableName, "cumulative") - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } qt_sc """ select count(*) from ${tableName} """ qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy index 6fb74ceda4bb53..9929aaecf8cd67 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy @@ -34,7 +34,7 @@ suite ("test_uniq_rollup_schema_change") { String result = getMVJobState(tbName) if (result == "FINISHED") { return true; - } + } return false; }); // when timeout awaitlity will raise a exception. @@ -97,7 +97,7 @@ suite ("test_uniq_rollup_schema_change") { // add column sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" + ALTER table ${tableName} ADD COLUMN new_column INT default "1" """ sql """ SELECT * FROM ${tableName} WHERE user_id=2 """ @@ -168,29 +168,8 @@ suite ("test_uniq_rollup_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } + trigger_and_wait_compaction(tableName, "cumulative") - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } qt_sc """ select count(*) from ${tableName} """ qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy index 9ca8111d0ff737..72ff4fe564df08 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy @@ -76,7 +76,7 @@ suite ("test_uniq_vals_schema_change") { // add column sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" + ALTER table ${tableName} ADD COLUMN new_column INT default "1" """ sql """ SELECT * FROM ${tableName} WHERE user_id=2 """ @@ -130,29 +130,7 @@ suite ("test_uniq_vals_schema_change") { """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") qt_sc """ select count(*) from ${tableName} """ qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ diff --git a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy index 2bb5e823e8bd17..7b5d7897114352 100644 --- a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy @@ -24,7 +24,7 @@ suite ("test_varchar_schema_change") { def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ return jobStateResult[0][9] } - + def tableName = "varchar_schema_change_regression_test" try { @@ -57,7 +57,7 @@ suite ("test_varchar_schema_change") { """ exception "Cannot shorten string length" } - + // test {//为什么第一次改没发生Nothing is changed错误?查看branch-1.2-lts代码 // sql """ alter table ${tableName} modify column c2 varchar(20) // """ @@ -108,36 +108,15 @@ suite ("test_varchar_schema_change") { sql """ insert into ${tableName} values(55,'2009-11-21','12d1d113','123aa') """ // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - } - - // wait for all compactions done - for (String[] tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet[0] - backend_id = tablet[2] - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") qt_sc " select * from ${tableName} order by 1,2; " qt_sc " select min(c2),max(c2) from ${tableName} order by 1,2; " qt_sc " select min(c2),max(c2) from ${tableName} group by c0 order by 1,2; " sleep(5000) - sql """ alter table ${tableName} - modify column c2 varchar(40), + sql """ alter table ${tableName} + modify column c2 varchar(40), modify column c3 varchar(6) DEFAULT '0' """ Awaitility.await().atMost(max_try_secs, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() -> { diff --git a/regression-test/suites/variant_github_events_p2/load.groovy b/regression-test/suites/variant_github_events_p2/load.groovy index 8e6c05ad3e91d1..5fd72b71ce7b29 100644 --- a/regression-test/suites/variant_github_events_p2/load.groovy +++ b/regression-test/suites/variant_github_events_p2/load.groovy @@ -23,7 +23,7 @@ suite("regression_test_variant_github_events_p2", "nonConcurrent,p2"){ def delta_time = 1000 def alter_res = "null" def useTime = 0 - + def wait_for_latest_op_on_table_finish = { table_name, OpTimeout -> for(int t = delta_time; t <= OpTimeout; t += delta_time){ alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = "${table_name}" ORDER BY CreateTime DESC LIMIT 1;""" @@ -106,7 +106,7 @@ suite("regression_test_variant_github_events_p2", "nonConcurrent,p2"){ logger.info("wait_for_last_build_index_on_table_running debug: " + alter_res) assertTrue(useTime <= OpTimeout, "wait_for_last_build_index_on_table_running timeout") return "wait_timeout" - } + } def backendId_to_backendIP = [:] @@ -117,15 +117,15 @@ suite("regression_test_variant_github_events_p2", "nonConcurrent,p2"){ def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) } - } + } def load_json_data = {table_name, file_name -> // load the json data streamLoad { table "${table_name}" // set http request header params - set 'read_json_by_line', 'true' - set 'format', 'json' + set 'read_json_by_line', 'true' + set 'format', 'json' set 'max_filter_ratio', '0.1' set 'memtable_on_sink_node', 'true' file file_name // import json file @@ -153,11 +153,11 @@ suite("regression_test_variant_github_events_p2", "nonConcurrent,p2"){ sql """ CREATE TABLE IF NOT EXISTS ${table_name} ( k bigint, - v variant + v variant -- INDEX idx_var(v) USING INVERTED PROPERTIES("parser" = "english") COMMENT '' ) DUPLICATE KEY(`k`) - DISTRIBUTED BY HASH(k) BUCKETS 4 + DISTRIBUTED BY HASH(k) BUCKETS 4 properties("replication_num" = "1", "disable_auto_compaction" = "true", "bloom_filter_columns" = "v", "variant_enable_flatten_nested" = "true"); """ set_be_config.call("variant_ratio_of_defaults_as_sparse_column", "1") @@ -179,42 +179,19 @@ suite("regression_test_variant_github_events_p2", "nonConcurrent,p2"){ load_json_data.call(table_name, """${getS3Url() + '/regression/gharchive.m/2022-11-07-23.json'}""") if (!isCloudMode()) { - // BUILD INDEX and expect state is FINISHED + // BUILD INDEX and expect state is FINISHED sql """ BUILD INDEX idx_var ON github_events""" state = wait_for_last_build_index_on_table_finish("github_events", timeout) assertEquals("FINISHED", state) } - - // // add bloom filter at the end of loading data + + // // add bloom filter at the end of loading data def tablets = sql_return_maparray """ show tablets from github_events; """ // trigger compactions for all tablets in github_events - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction("github_events", "cumulative") - sql """set enable_match_without_inverted_index = false""" + sql """set enable_match_without_inverted_index = false""" sql """ set enable_common_expr_pushdown = true """ // filter by bloom filter qt_sql """select cast(v["payload"]["pull_request"]["additions"] as int) from github_events where cast(v["repo"]["name"] as string) = 'xpressengine/xe-core' order by 1;""" @@ -226,7 +203,7 @@ suite("regression_test_variant_github_events_p2", "nonConcurrent,p2"){ v variant not null ) UNIQUE KEY(`k`) - DISTRIBUTED BY HASH(k) BUCKETS 4 + DISTRIBUTED BY HASH(k) BUCKETS 4 properties("replication_num" = "1", "disable_auto_compaction" = "false", "bloom_filter_columns" = "v", "variant_enable_flatten_nested" = "true"); """ sql """insert into github_events2 select * from github_events order by k""" @@ -236,4 +213,4 @@ suite("regression_test_variant_github_events_p2", "nonConcurrent,p2"){ // query with inverted index qt_sql """select cast(v["payload"]["pull_request"]["additions"] as int) from github_events where v["repo"]["name"] match 'xpressengine' order by 1;""" qt_sql """select count() from github_events where v["repo"]["name"] match 'apache' order by 1;""" -} \ No newline at end of file +} diff --git a/regression-test/suites/variant_p0/compaction/test_compaction.groovy b/regression-test/suites/variant_p0/compaction/test_compaction.groovy index 2499de5712d934..4a47655a967940 100644 --- a/regression-test/suites/variant_p0/compaction/test_compaction.groovy +++ b/regression-test/suites/variant_p0/compaction/test_compaction.groovy @@ -27,7 +27,7 @@ suite("test_compaction_variant") { backend_id = backendId_to_backendIP.keySet()[0] def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def configList = parseJson(out.trim()) @@ -49,7 +49,7 @@ suite("test_compaction_variant") { sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( k bigint, - v ${var_def} + v ${var_def} ) ${key_type} KEY(`k`) DISTRIBUTED BY HASH(k) BUCKETS ${buckets} @@ -89,35 +89,7 @@ suite("test_compaction_variant") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") int rowCount = 0 for (def tablet in tablets) { @@ -135,7 +107,7 @@ suite("test_compaction_variant") { qt_sql_11 "SELECT * FROM ${tableName} ORDER BY k, cast(v as string); " qt_sql_22 "select k, cast(v['a'] as array) from ${tableName} where size(cast(v['a'] as array)) > 0 order by k" qt_sql_33 "select k, v['a'], cast(v['b'] as string) from ${tableName} where length(cast(v['b'] as string)) > 4 order by k" - qt_sql_55 "select cast(v['b'] as string), cast(v['b']['c'] as string) from ${tableName} where cast(v['b'] as string) != 'null' and cast(v['b'] as string) != '{}' order by k desc limit 10;" + qt_sql_55 "select cast(v['b'] as string), cast(v['b']['c'] as string) from ${tableName} where cast(v['b'] as string) != 'null' and cast(v['b'] as string) != '{}' order by k desc limit 10;" } } finally { diff --git a/regression-test/suites/variant_p0/nested.groovy b/regression-test/suites/variant_p0/nested.groovy index 7df361c5731644..7235386960fb61 100644 --- a/regression-test/suites/variant_p0/nested.groovy +++ b/regression-test/suites/variant_p0/nested.groovy @@ -21,7 +21,7 @@ suite("regression_test_variant_nested", "p0"){ getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); try { - + def table_name = "var_nested" sql "DROP TABLE IF EXISTS ${table_name}" @@ -58,7 +58,7 @@ suite("regression_test_variant_nested", "p0"){ insert into var_nested values (21, '{"nested": [{"ax1111" : "1111"},{"axxxb": 100, "xxxy111": 111}, {"ddsss":1024, "aaa" : "11"}, {"xx" : 10}]}'); insert into var_nested values (22, '{"nested": [{"axxxb": 100, "xxxy111": 111}, {"ddsss":1024, "aaa" : "11"}, {"xx" : 10}, {"zzz11" : "123333"}]}'); insert into var_nested values (23, '{"nested": [{"yyyxxxx" : "11111"},{"ax1111" : "1111"},{"axxxb": 100, "xxxy111": 111}, {"ddsss":1024, "aaa" : "11"}, {"xx" : 10}]}'); - """ + """ sql """ insert into var_nested values (24, '{"xx" : 10}'); @@ -72,64 +72,19 @@ suite("regression_test_variant_nested", "p0"){ qt_sql """DESC var_nested""" qt_sql """ select * from var_nested order by k limit 101 - """ + """ for (int i = 101; i < 121; ++i) { sql """insert into var_nested values (${i}, '{"nested${i}" : {"nested": [{"yyyxxxx" : "11111"},{"ax1111" : "1111"},{"axxxb": 100, "xxxy111": 111}, {"ddsss":1024, "aaa" : "11"}, {"xx" : 10}]}, "not nested" : 1024, "not nested2" : {"llll" : 123}}');""" } - def trigger_full_compaction_on_tablets = { tablets -> - for (def tablet : tablets) { - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - int times = 1 - - String compactionStatus; - do{ - def (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - compactionStatus = parseJson(out.trim()).status.toLowerCase(); - } while (compactionStatus!="success" && times<=10) - - - if (compactionStatus == "fail") { - logger.info("Compaction was done automatically!") - } - } - } - def wait_full_compaction_done = { tablets -> - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - String backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - def triger_compaction = { -> - // triger compaction - def tablets = sql_return_maparray """ show tablets from var_nested; """ - - - trigger_full_compaction_on_tablets.call(tablets) - wait_full_compaction_done.call(tablets) - } - triger_compaction.call() + trigger_and_wait_compaction("var_nested", "full") qt_sql """ select * from var_nested order by k limit 101 - """ + """ sql """INSERT INTO var_nested SELECT *, '{"k1":1, "k2": "some", "k3" : [1234], "k4" : 1.10000, "k5" : [[123]], "nested1" : {"nested2" : [{"a" : 10, "b" : 1.1, "c" : "1111"}]}}' FROM numbers("number" = "1000") where number > 200 limit 100;""" sql """INSERT INTO var_nested SELECT *, '{"k2":1, "k3": "nice", "k4" : [1234], "k5" : 1.10000, "k6" : [[123]], "nested2" : {"nested1" : [{"a" : 10, "b" : 1.1, "c" : "1111"}]}}' FROM numbers("number" = "5013") where number >= 400 limit 1024;""" - triger_compaction.call() + trigger_and_wait_compaction("var_nested", "full") qt_sql """select /*+SET_VAR(batch_size=1024,broker_load_batch_size=16352,disable_streaming_preaggregations=true,enable_distinct_streaming_aggregation=true,parallel_fragment_exec_ parallel_pipeline_task_num=7,parallel_fragment_exec_instance_num=4,profile_level=1,enable_pipeline_engine=true,enable_parallel_scan=false,parallel_scan_max_scanners_count=16 @@ -146,7 +101,7 @@ parallel_pipeline_task_num=7,parallel_fragment_exec_instance_num=4,profile_level // type change case sql """INSERT INTO var_nested SELECT *, '{"k1":"1", "k2": 1.1, "k3" : [1234.0], "k4" : 1.10000, "k5" : [["123"]], "nested1" : {"nested2" : [{"a" : "10", "b" : "1.1", "c" : 1111.111}]}}' FROM numbers("number" = "8000") where number > 7000 limit 100;""" qt_sql """select * from var_nested where v['k2'] = 'what' and array_contains(cast(v['nested1']['nested2']['a'] as array), 10) order by k limit 1;""" - triger_compaction.call() + trigger_and_wait_compaction("var_nested", "full") qt_sql """select * from var_nested where v['k2'] = 'nested' and array_contains(cast(v['nested1']['nested2']['a'] as array), 10) order by k limit 1;""" sql """select * from var_nested where v['k2'] = 'some' or v['k3'] = 'nice' limit 100;""" @@ -205,12 +160,12 @@ where phone_numbers['type'] = 'GSM' OR phone_numbers['type'] = 'HOME' and phone_ order_qt_explode_sql """select count(),cast(vv as int) from var_nested_explode_variant_with_abnomal lateral view explode_variant_array(v['nested']['x']) tmp as vv where vv = 10 group by cast(vv as int)""" // 2. v['nested']['xx'] is normal array order_qt_explode_sql """select count(),cast(vv as int) from var_nested_explode_variant_with_abnomal lateral view explode_variant_array(v['nested']['xx']) tmp as vv where vv = 10 group by cast(vv as int)""" - // 3. v['xx'] is none array scalar type + // 3. v['xx'] is none array scalar type test { sql """select count(),cast(vv as int) from var_nested_explode_variant_with_abnomal lateral view explode_variant_array(v['xx']) tmp as vv where vv = 10 group by cast(vv as int)""" exception("explode not support none array type") } - // 4. v['k1'] is json scalar type + // 4. v['k1'] is json scalar type test { sql """select count(),cast(vv as int) from var_nested_explode_variant_with_abnomal lateral view explode_variant_array(v['k1']) tmp as vv where vv = 10 group by cast(vv as int)""" exception("explode not support none array type") @@ -226,4 +181,4 @@ where phone_numbers['type'] = 'GSM' OR phone_numbers['type'] = 'HOME' and phone_ // reset flags } -} \ No newline at end of file +} diff --git a/regression-test/suites/variant_p1/compaction/compaction_sparse_column.groovy b/regression-test/suites/variant_p1/compaction/compaction_sparse_column.groovy index 91f64c19a02d22..13ef63e9fe6c37 100644 --- a/regression-test/suites/variant_p1/compaction/compaction_sparse_column.groovy +++ b/regression-test/suites/variant_p1/compaction/compaction_sparse_column.groovy @@ -56,7 +56,7 @@ suite("test_compaction_sparse_column", "p1,nonConcurrent") { k bigint, v variant ) - DUPLICATE KEY(`k`) + DUPLICATE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 PROPERTIES ( "replication_num" = "1", @@ -67,26 +67,26 @@ suite("test_compaction_sparse_column", "p1,nonConcurrent") { sql """insert into ${tableName} select 0, '{"a": 11245, "b" : 42000}' as json_str union all select 0, '{"a": 1123}' as json_str union all select 0, '{"a" : 1234, "xxxx" : "aaaaa"}' as json_str from numbers("number" = "4096") limit 4096 ;""" - + sql """insert into ${tableName} select 1, '{"a": 11245, "b" : 42001}' as json_str union all select 1, '{"a": 1123}' as json_str union all select 1, '{"a" : 1234, "xxxx" : "bbbbb"}' as json_str from numbers("number" = "4096") limit 4096 ;""" - + sql """insert into ${tableName} select 2, '{"a": 11245, "b" : 42002}' as json_str union all select 2, '{"a": 1123}' as json_str union all select 2, '{"a" : 1234, "xxxx" : "ccccc"}' as json_str from numbers("number" = "4096") limit 4096 ;""" - + sql """insert into ${tableName} select 3, '{"a" : 1234, "point" : 1, "xxxx" : "ddddd"}' as json_str union all select 3, '{"a": 1123}' as json_str union all select 3, '{"a": 11245, "b" : 42003}' as json_str from numbers("number" = "4096") limit 4096 ;""" sql """insert into ${tableName} select 4, '{"a" : 1234, "xxxx" : "eeeee", "point" : 5}' as json_str union all select 4, '{"a": 1123}' as json_str union all select 4, '{"a": 11245, "b" : 42004}' as json_str from numbers("number" = "4096") limit 4096 ;""" - - + + sql """insert into ${tableName} select 5, '{"a" : 1234, "xxxx" : "fffff", "point" : 42000}' as json_str union all select 5, '{"a": 1123}' as json_str union all select 5, '{"a": 11245, "b" : 42005}' as json_str from numbers("number" = "4096") limit 4096 ;""" - + qt_select_b_bfcompact """ SELECT count(cast(v['b'] as int)) FROM ${tableName};""" qt_select_xxxx_bfcompact """ SELECT count(cast(v['xxxx'] as string)) FROM ${tableName};""" qt_select_point_bfcompact """ SELECT count(cast(v['point'] as bigint)) FROM ${tableName};""" @@ -108,35 +108,7 @@ suite("test_compaction_sparse_column", "p1,nonConcurrent") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - Awaitility.await().untilAsserted(() -> { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - return compactionStatus.run_status; - }); - } + trigger_and_wait_compaction(tableName, "cumulative") int rowCount = 0 for (def tablet in tablets) { diff --git a/regression-test/suites/variant_p1/compaction/test_compaction_extract_root.groovy b/regression-test/suites/variant_p1/compaction/test_compaction_extract_root.groovy index 83dc0a559e6fc5..015ab9baa1d03f 100644 --- a/regression-test/suites/variant_p1/compaction/test_compaction_extract_root.groovy +++ b/regression-test/suites/variant_p1/compaction/test_compaction_extract_root.groovy @@ -51,7 +51,7 @@ suite("test_compaction_extract_root", "p1,nonConcurrent") { k bigint, v variant ) - DUPLICATE KEY(`k`) + DUPLICATE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 PROPERTIES ( "replication_num" = "1", @@ -64,22 +64,22 @@ suite("test_compaction_extract_root", "p1,nonConcurrent") { sql """insert into ${tableName} select 0, '{"a": 11245, "b" : {"state" : "open", "code" : 2}}' as json_str union all select 8, '{"a": 1123}' as json_str union all select 0, '{"a" : 1234, "xxxx" : "aaaaa"}' as json_str from numbers("number" = "4096") limit 4096 ;""" - + sql """insert into ${tableName} select 1, '{"a": 11245, "b" : {"state" : "colse", "code" : 2}}' as json_str union all select 1, '{"a": 1123}' as json_str union all select 1, '{"a" : 1234, "xxxx" : "bbbbb"}' as json_str from numbers("number" = "4096") limit 4096 ;""" - + sql """insert into ${tableName} select 2, '{"a": 11245, "b" : {"state" : "flat", "code" : 3}}' as json_str union all select 2, '{"a": 1123}' as json_str union all select 2, '{"a" : 1234, "xxxx" : "ccccc"}' as json_str from numbers("number" = "4096") limit 4096 ;""" - + sql """insert into ${tableName} select 3, '{"a" : 1234, "xxxx" : 4, "point" : 5}' as json_str union all select 3, '{"a": 1123}' as json_str union all select 3, '{"a": 11245, "b" : 42003}' as json_str from numbers("number" = "4096") limit 4096 ;""" sql """insert into ${tableName} select 4, '{"a" : 1234, "xxxx" : "eeeee", "point" : 5}' as json_str union all select 4, '{"a": 1123}' as json_str union all select 4, '{"a": 11245, "b" : 42004}' as json_str from numbers("number" = "4096") limit 4096 ;""" - + sql """insert into ${tableName} select 5, '{"a" : 1234, "xxxx" : "fffff", "point" : 42000}' as json_str union all select 5, '{"a": 1123}' as json_str union all select 5, '{"a": 11245, "b" : 42005}' as json_str from numbers("number" = "4096") limit 4096 ;""" @@ -96,37 +96,7 @@ suite("test_compaction_extract_root", "p1,nonConcurrent") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - assertEquals(disableAutoCompaction, false) - logger.info("Compaction was done automatically!") - } - if (disableAutoCompaction) { - assertEquals("success", compactJson.status.toLowerCase()) - } - } - - // wait for all compactions done - for (def tablet in tablets) { - boolean running = true - do { - Thread.sleep(1000) - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } + trigger_and_wait_compaction(tableName, "cumulative") int rowCount = 0 for (def tablet in tablets) {