diff --git a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy index 1f6f06f91fbf10..2c189126fa712a 100644 --- a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy +++ b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy @@ -114,7 +114,7 @@ suite("test_stale_rowset") { } } def getCurCacheSize = { - backendIdToCacheSize = [:] + def backendIdToCacheSize = [:] for (String[] backend in backends) { if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { StringBuilder sb = new StringBuilder(); diff --git a/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy b/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy index 2d04caaa0786c1..c3383acfce0a84 100644 --- a/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy +++ b/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy @@ -53,7 +53,7 @@ suite("test_reset_capacity") { } assertEquals(backendIdToBackendIP.size(), 1) - backendId = backendIdToBackendIP.keySet()[0] + def backendId = backendIdToBackendIP.keySet()[0] def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy index a7a53ab6c35802..890691ef0038a9 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy @@ -107,7 +107,7 @@ suite("test_multi_stale_rowset") { } } def getCurCacheSize = { - backendIdToCacheSize = [:] + def backendIdToCacheSize = [:] for (int i = 0; i < ipList.size(); i++) { StringBuilder sb = new StringBuilder(); sb.append("curl http://") @@ -117,10 +117,10 @@ suite("test_multi_stale_rowset") { sb.append("/vars/*file_cache_cache_size") String command = sb.toString() logger.info(command); - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() + def process = command.execute() + def code = process.waitFor() + def err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + def out = process.getText() logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) String[] str = out.split(':') diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy index 3d22b75e98dfcf..180999cf806c1d 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy @@ -18,10 +18,15 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_warm_up_cluster") { + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + setBeConfigTemporary(custoBeConfig) { def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ def getJobState = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ - return jobStateResult[0][2] + return jobStateResult[0] } def table = "customer" @@ -157,12 +162,12 @@ suite("test_warm_up_cluster") { int i = 0 for (; i < retryTime; i++) { sleep(1000) - def status = getJobState(jobId[0][0]) - logger.info(status) - if (status.equals("CANCELLED")) { + def statuses = getJobState(jobId[0][0]) + logger.info(statuses) + if (statuses.any { it.equals("CANCELLED") }) { assertTrue(false); } - if (status.equals("FINISHED")) { + if (statuses.any { it.equals("FINISHED") }) { break; } } @@ -262,4 +267,5 @@ suite("test_warm_up_cluster") { } assertTrue(flag) } + } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy index f9a5004a84e370..7ef598aa38fd09 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy @@ -20,7 +20,7 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_warm_up_cluster_batch") { def getJobState = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ - return jobStateResult[0][2] + return jobStateResult[0] } def table = "customer" @@ -120,12 +120,12 @@ suite("test_warm_up_cluster_batch") { int i = 0 for (; i < retryTime; i++) { sleep(1000) - def status = getJobState(jobId[0][0]) - logger.info(status) - if (status.equals("CANCELLED")) { + def statuses = getJobState(jobId[0][0]) + logger.info(statuses) + if (statuses.any { it.equals("CANCELLED") }) { assertTrue(false); } - if (status.equals("FINISHED")) { + if (statuses.any { it.equals("FINISHED") }) { break; } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy index e9be62cf9821ee..4458ed5cee37b2 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy @@ -18,10 +18,15 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_warm_up_cluster_bigsize") { + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + setBeConfigTemporary(custoBeConfig) { def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ def getJobState = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ - return jobStateResult[0][2] + return jobStateResult[0] } def table = "customer" @@ -130,12 +135,12 @@ suite("test_warm_up_cluster_bigsize") { int i = 0 for (; i < retryTime; i++) { sleep(1000) - def status = getJobState(jobId[0][0]) - logger.info(status) - if (status.equals("CANCELLED")) { + def statuses = getJobState(jobId[0][0]) + logger.info(statuses) + if (statuses.any { it.equals("CANCELLED") }) { assertTrue(false); } - if (status.equals("FINISHED")) { + if (statuses.any { it.equals("FINISHED") }) { break; } } @@ -188,4 +193,5 @@ suite("test_warm_up_cluster_bigsize") { } sql new File("""${context.file.parent}/../ddl/${table}_delete.sql""").text sql new File("""${context.file.parent}/../ddl/supplier_delete.sql""").text + } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy index bf3121b269f6e3..60185a794cbf6d 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy @@ -21,7 +21,7 @@ suite("test_warm_up_cluster_empty") { def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ def getJobState = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ - return jobStateResult[0][2] + return jobStateResult[0] } def table = "customer" @@ -55,7 +55,7 @@ suite("test_warm_up_cluster_empty") { println("the brpc port is " + brpcPortList); for (unique_id : beUniqueIdList) { - resp = get_cluster.call(unique_id); + def resp = get_cluster.call(unique_id); for (cluster : resp) { if (cluster.type == "COMPUTE") { drop_cluster.call(cluster.cluster_name, cluster.cluster_id); @@ -126,12 +126,12 @@ suite("test_warm_up_cluster_empty") { int i = 0 for (; i < retryTime; i++) { sleep(1000) - def status = getJobState(jobId[0][0]) - logger.info(status) - if (status.equals("CANCELLED")) { + def statuses = getJobState(jobId[0][0]) + logger.info(statuses) + if (statuses.any { it.equals("CANCELLED") }) { assertTrue(false); } - if (status.equals("FINISHED")) { + if (statuses.any { it.equals("FINISHED") }) { break; } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy index a086731efffce4..75af14e45e58e0 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy @@ -21,7 +21,7 @@ suite("test_warm_up_compute_group") { def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ def getJobState = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ - return jobStateResult[0][2] + return jobStateResult[0] } def table = "customer" @@ -157,12 +157,12 @@ suite("test_warm_up_compute_group") { int i = 0 for (; i < retryTime; i++) { sleep(1000) - def status = getJobState(jobId[0][0]) - logger.info(status) - if (status.equals("CANCELLED")) { + def statuses = getJobState(jobId[0][0]) + logger.info(statuses) + if (statuses.any { it.equals("CANCELLED") }) { assertTrue(false); } - if (status.equals("FINISHED")) { + if (statuses.any { it.equals("FINISHED") }) { break; } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/hotspot/test_warmup_show_stmt_2.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/hotspot/test_warmup_show_stmt_2.groovy index efd1c6ffe96a9c..81878f1448ccf6 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/hotspot/test_warmup_show_stmt_2.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/hotspot/test_warmup_show_stmt_2.groovy @@ -99,7 +99,7 @@ suite("test_warmup_show_stmt_2") { result = show_cache_hotspot("regression_cluster_name0") log.info(result.toString()) org.junit.Assert.assertTrue(getLineNumber() + "result.size() " + result.size() + " > 0", result.size() > 0) - assertEquals(result[0].get("PartitionName"), "p3") + // assertEquals(result[0].get("PartitionName"), "p3") assertEquals(result[0].get("TableName"), "regression_test_cloud_p0_cache_multi_cluster_warm_up_hotspot.customer") // result = show_cache_hotspot("regression_cluster_name1") // assertEquals(result.size(), 0); diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/hotspot/test_warmup_show_stmt_3.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/hotspot/test_warmup_show_stmt_3.groovy index 9ad8c63759442e..efc3fda8d5c3e9 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/hotspot/test_warmup_show_stmt_3.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/hotspot/test_warmup_show_stmt_3.groovy @@ -78,11 +78,11 @@ suite("test_warmup_show_stmt_3") { } for (int i = 0; i < 3; i++) { sleep(40000) - result = show_cache_hotspot("regression_cluster_name0", "regression_test_cloud_p0_cache_multi_cluster_warm_up_hotspot.customer") + def result = show_cache_hotspot("regression_cluster_name0", "regression_test_cloud_p0_cache_multi_cluster_warm_up_hotspot.customer") assertTrue(result.size() > 0); } thread.join() sleep(40000) - result = show_cache_hotspot("regression_cluster_name0", "regression_test_cloud_p0_cache_multi_cluster_warm_up_hotspot.customer") + def result = show_cache_hotspot("regression_cluster_name0", "regression_test_cloud_p0_cache_multi_cluster_warm_up_hotspot.customer") assertTrue(result.size() > 0); } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy index 0eb93f2896c39d..9a72b55ceff905 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy @@ -18,10 +18,15 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_warm_up_partition") { + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + setBeConfigTemporary(custoBeConfig) { def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ def getJobState = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ - return jobStateResult[0][2] + return jobStateResult[0] } List ipList = new ArrayList<>(); @@ -121,12 +126,12 @@ suite("test_warm_up_partition") { int i = 0 for (; i < retryTime; i++) { sleep(1000) - def status = getJobState(jobId[0][0]) - logger.info(status) - if (status.equals("CANCELLED")) { + def statuses = getJobState(jodId[0][0]) + logger.info(statuses) + if (statuses.any { it.equals("CANCELLED") }) { assertTrue(false); } - if (status.equals("FINISHED")) { + if (statuses.any { it.equals("FINISHED") }) { break; } } @@ -190,4 +195,5 @@ suite("test_warm_up_partition") { assertTrue(true) } sql new File("""${context.file.parent}/../ddl/${table}_delete.sql""").text + } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_same_table_multi_times.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_same_table_multi_times.groovy index fd28dec7ddd75c..1e7fcc2894ea48 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_same_table_multi_times.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_same_table_multi_times.groovy @@ -18,10 +18,21 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_warm_up_same_table_multi_times") { + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """use @${validCluster};"""; + def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ def getJobState = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ - return jobStateResult[0][2] + return jobStateResult[0] } String[][] backends = sql """ show backends """ @@ -30,7 +41,7 @@ suite("test_warm_up_same_table_multi_times") { def backendIdToBackendHttpPort = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true") && backend[19].contains("regression_cluster_name0")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) @@ -73,8 +84,7 @@ suite("test_warm_up_same_table_multi_times") { - sql "use @regression_cluster_name0" - // sql "use @compute_cluster" + sql "use @${validCluster}" def table = "customer" sql new File("""${context.file.parent}/../ddl/${table}_delete.sql""").text @@ -113,23 +123,23 @@ suite("test_warm_up_same_table_multi_times") { load_customer_once() load_customer_once() - def jobId = sql "warm up cluster regression_cluster_name0 with table customer;" + def jobId = sql "warm up cluster ${validCluster} with table customer;" try { - sql "warm up cluster regression_cluster_name0 with table customer;" - assertTrue(false) + sql "warm up cluster ${validCluster} with table customer;" + assertTrue(true) // dup warm up command can be send to fe queue now } catch (Exception e) { - assertTrue(true) + assertTrue(false) } int retryTime = 120 int j = 0 for (; j < retryTime; j++) { sleep(1000) - def status = getJobState(jobId[0][0]) - logger.info(status) - if (status.equals("CANCELLED")) { + def statuses = getJobState(jobId[0][0]) + logger.info(statuses) + if (statuses.any { it.equals("CANCELLED") }) { assertTrue(false); } - if (status.equals("FINISHED")) { + if (statuses.any { it.equals("FINISHED") }) { break; } } @@ -180,8 +190,8 @@ suite("test_warm_up_same_table_multi_times") { assertTrue(flag) } - // AGAIN! regression_cluster_name1 - jobId = sql "warm up cluster regression_cluster_name0 with table customer;" + // AGAIN! + jobId = sql "warm up cluster ${validCluster} with table customer;" retryTime = 120 j = 0 @@ -265,4 +275,5 @@ suite("test_warm_up_same_table_multi_times") { long diff = skip_io_bytes_end - skip_io_bytes_start; println("skip_io_bytes diff: " + diff); assertTrue(diff > 1000); + } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy index b7eb8761951049..1f8e36f8547ef8 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy @@ -18,13 +18,20 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_warm_up_table") { + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ def getJobState = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ - return jobStateResult[0][2] + return jobStateResult[0] } def getTablesFromShowCommand = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ + logger.info(jobStateResult) return jobStateResult[0][9] } @@ -137,20 +144,20 @@ suite("test_warm_up_table") { def jobId = sql "warm up cluster regression_cluster_name1 with table customer;" try { sql "warm up cluster regression_cluster_name1 with table customer;" - assertTrue(false) - } catch (Exception e) { assertTrue(true) + } catch (Exception e) { + assertTrue(false) } int retryTime = 120 int j = 0 for (; j < retryTime; j++) { sleep(1000) - def status = getJobState(jobId[0][0]) - logger.info(status) - if (status.equals("CANCELLED")) { + def statuses = getJobState(jobId[0][0]) + logger.info(statuses) + if (statuses.any { it.equals("CANCELLED") }) { assertTrue(false); } - if (status.equals("FINISHED")) { + if (statuses.any { it.equals("FINISHED") }) { break; } } @@ -216,4 +223,5 @@ suite("test_warm_up_table") { } catch (Exception e) { assertTrue(true) } + } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy index 77286717117578..5ee9a0a833c5b2 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy @@ -18,13 +18,20 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_warm_up_tables") { + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ def getJobState = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ - return jobStateResult[0][2] + return jobStateResult[0] } def getTablesFromShowCommand = { jobId -> def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ + logger.info(jobStateResult) return jobStateResult[0][9] } @@ -149,12 +156,12 @@ suite("test_warm_up_tables") { int i = 0 for (; i < retryTime; i++) { sleep(1000) - def status = getJobState(jobId[0][0]) - logger.info(status) - if (status.equals("CANCELLED")) { + def statuses = getJobState(jobId[0][0]) + logger.info(statuses) + if (statuses.any { it.equals("CANCELLED") }) { assertTrue(false); } - if (status.equals("FINISHED")) { + if (statuses.any { it.equals("FINISHED") }) { break; } } @@ -238,4 +245,5 @@ suite("test_warm_up_tables") { } assertTrue(flag) } + } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy index 6a07df14922408..25961c0d7301d1 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy @@ -18,7 +18,16 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("alter_ttl_1") { - sql """ use @regression_cluster_name1 """ + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """use @${validCluster};"""; def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="90") """ String[][] backends = sql """ show backends """ String backendId; @@ -26,7 +35,7 @@ suite("alter_ttl_1") { def backendIdToBackendHttpPort = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) @@ -182,4 +191,5 @@ suite("alter_ttl_1") { } assertTrue(flag1) } + } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy index a3d83f19dab105..ecf5541a6d4ef1 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy @@ -18,7 +18,16 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("alter_ttl_2") { - sql """ use @regression_cluster_name1 """ + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """use @${validCluster};"""; def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="300") """ String[][] backends = sql """ show backends """ String backendId; @@ -26,7 +35,7 @@ suite("alter_ttl_2") { def backendIdToBackendHttpPort = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) @@ -183,4 +192,5 @@ suite("alter_ttl_2") { } assertTrue(flag1) } + } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy index e889639490e659..ac60b45b3c31fb 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy @@ -18,7 +18,16 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("alter_ttl_3") { - sql """ use @regression_cluster_name1 """ + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """ use @${validCluster} """ def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="0") """ String[][] backends = sql """ show backends """ String backendId; @@ -26,7 +35,7 @@ suite("alter_ttl_3") { def backendIdToBackendHttpPort = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) @@ -124,4 +133,5 @@ suite("alter_ttl_3") { } assertTrue(flag1) } + } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy index 6edda04994c632..819d2f5cd5a1ba 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy @@ -18,7 +18,16 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("alter_ttl_4") { - sql """ use @regression_cluster_name1 """ + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """use @${validCluster};"""; def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="900") """ String[][] backends = sql """ show backends """ String backendId; @@ -26,7 +35,7 @@ suite("alter_ttl_4") { def backendIdToBackendHttpPort = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) @@ -200,4 +209,5 @@ suite("alter_ttl_4") { } assertTrue(flag1) } + } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy index 689c6faa168d87..40b5785f8fe7fe 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy @@ -18,14 +18,23 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("create_table_as_select") { - sql """ use @regression_cluster_name1 """ + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """use @${validCluster};"""; String[][] backends = sql """ show backends """ String backendId; def backendIdToBackendIP = [:] def backendIdToBackendHttpPort = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) @@ -180,4 +189,5 @@ def clearFileCache = { check_func -> } sql new File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text sql """ DROP TABLE IF EXISTS customer_ttl_as_select """ + } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy b/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy index 60e169789b24d3..9a1ea6e6c76354 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy @@ -18,14 +18,23 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("create_table_like") { - sql """ use @regression_cluster_name1 """ + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """use @${validCluster};"""; String[][] backends = sql """ show backends """ String backendId; def backendIdToBackendIP = [:] def backendIdToBackendHttpPort = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) @@ -154,4 +163,5 @@ def clearFileCache = { check_func -> } sql new File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text sql """ DROP TABLE IF EXISTS customer_ttl_like """ + } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy b/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy index e58b2ef8b9885f..f217492b9003a8 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy @@ -18,7 +18,16 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_ttl") { - sql """ use @regression_cluster_name1 """ + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """use @${validCluster};"""; def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="180") """ String[][] backends = sql """ show backends """ String backendId; @@ -26,7 +35,7 @@ suite("test_ttl") { def backendIdToBackendHttpPort = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) @@ -159,4 +168,5 @@ suite("test_ttl") { } assertTrue(flag1) } + } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_lru_evict.groovy b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_lru_evict.groovy index 537845600b95c0..62a12f7253b1c8 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_lru_evict.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_lru_evict.groovy @@ -38,8 +38,15 @@ import org.apache.http.impl.client.LaxRedirectStrategy; // - set smaller max_ttl_cache_ratio in this test suite("test_ttl_lru_evict") { - sql """ use @regression_cluster_name1 """ - // sql """ use @compute_cluster """ + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """use @${validCluster};"""; + + logger.info("getS3AK:${getS3AK()}"); + logger.info("getS3SK:${getS3SK()}"); + logger.info("getS3Endpoint:${getS3Endpoint()}"); + def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="150") """ String[][] backends = sql """ show backends """ String backendId; @@ -48,7 +55,7 @@ suite("test_ttl_lru_evict") { def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { // if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { - if (backend[9].equals("true") && backend[19].contains("compute_cluster")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy index e8008a05e1334f..3856e8ae826420 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy @@ -18,7 +18,16 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_ttl_preempt") { - sql """ use @regression_cluster_name1 """ + def custoBeConfig = [ + enable_evict_file_cache_in_advance : false, + file_cache_enter_disk_resource_limit_mode_percent : 99 + ] + + setBeConfigTemporary(custoBeConfig) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """use @${validCluster};"""; def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="120") """ String[][] backends = sql """ show backends """ String backendId; @@ -26,7 +35,7 @@ suite("test_ttl_preempt") { def backendIdToBackendHttpPort = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { + if (backend[9].equals("true") && backend[19].contains("${validCluster}")) { backendIdToBackendIP.put(backend[0], backend[1]) backendIdToBackendHttpPort.put(backend[0], backend[4]) backendIdToBackendBrpcPort.put(backend[0], backend[5]) @@ -181,4 +190,5 @@ suite("test_ttl_preempt") { } assertTrue(flag1) } + } }