diff --git a/be/src/util/telemetry/telemetry.cpp b/be/src/util/telemetry/telemetry.cpp index 1b69da8356db94..4132b242f98ad3 100644 --- a/be/src/util/telemetry/telemetry.cpp +++ b/be/src/util/telemetry/telemetry.cpp @@ -44,6 +44,7 @@ #include "opentelemetry/sdk/common/attribute_utils.h" #include "opentelemetry/sdk/common/global_log_handler.h" #include "opentelemetry/sdk/trace/batch_span_processor.h" +#include "opentelemetry/sdk/trace/batch_span_processor_options.h" #include "opentelemetry/sdk/trace/tracer_provider.h" #include "opentelemetry/trace/propagation/http_trace_context.h" #include "service/backend_options.h" diff --git a/be/src/vec/aggregate_functions/aggregate_function_uniq.cpp b/be/src/vec/aggregate_functions/aggregate_function_uniq.cpp index a528162b28740e..97b0fba623cc91 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_uniq.cpp +++ b/be/src/vec/aggregate_functions/aggregate_function_uniq.cpp @@ -48,6 +48,9 @@ AggregateFunctionPtr create_aggregate_function_uniq(const std::string& name, } else if (which.is_decimal64()) { return creator_without_type::create>>( argument_types, result_is_nullable); + } else if (which.is_decimal128i()) { + return creator_without_type::create>>( + argument_types, result_is_nullable); } else if (which.is_decimal128() || which.is_decimal128i()) { return creator_without_type::create>>( argument_types, result_is_nullable); diff --git a/docker/thirdparties/docker-compose/oracle/init/03-create-table.sql b/docker/thirdparties/docker-compose/oracle/init/03-create-table.sql index 24bf6a7a856e98..046f18c0dc65fc 100644 --- a/docker/thirdparties/docker-compose/oracle/init/03-create-table.sql +++ b/docker/thirdparties/docker-compose/oracle/init/03-create-table.sql @@ -132,13 +132,6 @@ age number(2), score number(3,1) ); -CREATE TABLE "DORIS_TEST"."student3" -( -"id" NUMBER(5,0), -"NAME" VARCHAR2(20), -"AGE" NUMBER(2,0), -"SCORE" NUMBER(3,1) -); create table doris_test.test_all_types ( id int, diff --git a/docker/thirdparties/docker-compose/oracle/init/04-insert.sql b/docker/thirdparties/docker-compose/oracle/init/04-insert.sql index 663851cfa4ec95..888cdf5bf7e4e4 100644 --- a/docker/thirdparties/docker-compose/oracle/init/04-insert.sql +++ b/docker/thirdparties/docker-compose/oracle/init/04-insert.sql @@ -86,8 +86,6 @@ insert into doris_test."student2" values (2, 'bob', 21, 90.5); insert into doris_test."student2" values (3, 'jerry', 23, 88.0); insert into doris_test."student2" values (4, 'andy', 21, 93); -insert into doris_test."student3" values(1, 'doris', 3, 1.0); - insert into doris_test.test_all_types values (1, 111, 123, 7456123.89, 573, 34, 673.43, 34.1264, 56.2, 23.231, 99, 9999, 999999999, 999999999999999999, 999, 99999, 9999999999, 9999999999999999999, diff --git a/docs/en/docs/lakehouse/multi-catalog/jdbc.md b/docs/en/docs/lakehouse/multi-catalog/jdbc.md index 5fa29b130650e7..13151bc2f8db5b 100644 --- a/docs/en/docs/lakehouse/multi-catalog/jdbc.md +++ b/docs/en/docs/lakehouse/multi-catalog/jdbc.md @@ -52,7 +52,7 @@ PROPERTIES ("key"="value", ...) | `driver_url ` | Yes | | JDBC Driver Jar | | `driver_class ` | Yes | | JDBC Driver Class | | `only_specified_database` | No | "false" | Whether only the database specified to be synchronized. | -| `lower_case_table_names` | No | "false" | Whether to synchronize the database name, table name and column name of jdbc external data source in lowercase. | +| `lower_case_table_names` | No | "false" | Whether to synchronize jdbc external data source table names in lower case. | | `include_database_list` | No | "" | When only_specified_database=true,only synchronize the specified databases. split with ','. db name is case sensitive. | | `exclude_database_list` | No | "" | When only_specified_database=true,do not synchronize the specified databases. split with ','. db name is case sensitive. | @@ -68,7 +68,7 @@ PROPERTIES ("key"="value", ...) ### Lowercase table name synchronization -When `lower_case_table_names` is set to `true`, Doris is able to query non-lowercase databases and tables and columns by maintaining a mapping of lowercase names to actual names on the remote system +When `lower_case_table_names` is set to `true`, Doris is able to query non-lowercase databases and tables by maintaining a mapping of lowercase names to actual names on the remote system **Notice:** @@ -78,9 +78,9 @@ When `lower_case_table_names` is set to `true`, Doris is able to query non-lower For other databases, you still need to specify the real library name and table name when querying. -2. In Doris 2.0.3 and later versions, it is valid for all databases. When querying, all database names and table names and columns will be converted into real names and then queried. If you upgrade from an old version to 2.0. 3, `Refresh ` is required to take effect. +2. In Doris 2.0.3 and later versions, it is valid for all databases. When querying, all library names and table names will be converted into real names and then queried. If you upgrade from an old version to 2.0. 3, `Refresh ` is required to take effect. - However, if the database or table or column names differ only in case, such as `Doris` and `doris`, Doris cannot query them due to ambiguity. + However, if the database or table names differ only in case, such as `Doris` and `doris`, Doris cannot query them due to ambiguity. 3. When the FE parameter's `lower_case_table_names` is set to `1` or `2`, the JDBC Catalog's `lower_case_table_names` parameter must be set to `true`. If the FE parameter's `lower_case_table_names` is set to `0`, the JDBC Catalog parameter can be `true` or `false` and defaults to `false`. This ensures consistency and predictability in how Doris handles internal and external table configurations. @@ -113,8 +113,8 @@ In some cases, the keywords in the database might be used as the field names. Fo ### Predicate Pushdown 1. When executing a query like `where dt = '2022-01-01'`, Doris can push down these filtering conditions to the external data source, thereby directly excluding data that does not meet the conditions at the data source level, reducing the number of unqualified Necessary data acquisition and transfer. This greatly improves query performance while also reducing the load on external data sources. - -2. When `enable_func_pushdown` is set to true, the function conditions after where will also be pushed down to the external data source. Currently, only MySQL and ClickHouse are supported. If you encounter a function that is not supported by MySQL or ClickHouse, you can set this parameter to false. , currently Doris will automatically identify some functions not supported by MySQL and functions supported by CLickHouse for push-down condition filtering, which can be viewed through explain sql. + +2. When `enable_func_pushdown` is set to true, the function condition after where will also be pushed down to the external data source. Currently, only MySQL is supported. If you encounter a function that MySQL does not support, you can set this parameter to false, at present, Doris will automatically identify some functions not supported by MySQL to filter the push-down conditions, which can be checked by explain sql. Functions that are currently not pushed down include: @@ -123,13 +123,6 @@ Functions that are currently not pushed down include: | DATE_TRUNC | | MONEY_FORMAT | -Functions that are currently pushed down include: - -| ClickHouse | -|:--------------:| -| FROM_UNIXTIME | -| UNIX_TIMESTAMP | - ### Line Limit If there is a limit keyword in the query, Doris will translate it into semantics suitable for different data sources. diff --git a/docs/zh-CN/docs/lakehouse/multi-catalog/jdbc.md b/docs/zh-CN/docs/lakehouse/multi-catalog/jdbc.md index acc057caa54208..545f5ada6e23e6 100644 --- a/docs/zh-CN/docs/lakehouse/multi-catalog/jdbc.md +++ b/docs/zh-CN/docs/lakehouse/multi-catalog/jdbc.md @@ -44,16 +44,16 @@ PROPERTIES ("key"="value", ...) ## 参数说明 -| 参数 | 必须 | 默认值 | 说明 | -|---------------------------|-----|---------|-----------------------------------------------------------------------| -| `user` | 是 | | 对应数据库的用户名 | -| `password` | 是 | | 对应数据库的密码 | -| `jdbc_url` | 是 | | JDBC 连接串 | -| `driver_url` | 是 | | JDBC Driver Jar 包名称 | -| `driver_class` | 是 | | JDBC Driver Class 名称 | -| `lower_case_table_names` | 否 | "false" | 是否以小写的形式同步jdbc外部数据源的库名和表名以及列名 | -| `only_specified_database` | 否 | "false" | 指定是否只同步指定的 database | -| `include_database_list` | 否 | "" | 当only_specified_database=true时,指定同步多个database,以','分隔。db名称是大小写敏感的。 | +| 参数 | 必须 | 默认值 | 说明 | +|---------------------------|-----|---------|---------------------------------------------------------------------------------------------| +| `user` | 是 | | 对应数据库的用户名 | +| `password` | 是 | | 对应数据库的密码 | +| `jdbc_url` | 是 | | JDBC 连接串 | +| `driver_url` | 是 | | JDBC Driver Jar 包名称 | +| `driver_class` | 是 | | JDBC Driver Class 名称 | +| `lower_case_table_names` | 否 | "false" | 是否以小写的形式同步jdbc外部数据源的库名和表名 | +| `only_specified_database` | 否 | "false" | 指定是否只同步指定的 database | +| `include_database_list` | 否 | "" | 当only_specified_database=true时,指定同步多个database,以','分隔。db名称是大小写敏感的。 | | `exclude_database_list` | 否 | "" | 当only_specified_database=true时,指定不需要同步的多个database,以','分割。db名称是大小写敏感的。 | ### 驱动包路径 @@ -68,7 +68,7 @@ PROPERTIES ("key"="value", ...) ### 小写表名同步 -当 `lower_case_table_names` 设置为 `true` 时,Doris 通过维护小写名称到远程系统中实际名称的映射,能够查询非小写的数据库和表以及列 +当 `lower_case_table_names` 设置为 `true` 时,Doris 通过维护小写名称到远程系统中实际名称的映射,能够查询非小写的数据库和表 **注意:** @@ -78,9 +78,9 @@ PROPERTIES ("key"="value", ...) 对于其他数据库,仍需要在查询时指定真实的库名和表名。 -2. 在 Doris 2.0.3 及之后的版本,对所有的数据库都有效,在查询时,会将所有的库名和表名以及列名转换为真实的名称,再去查询,如果是从老版本升级到 2.0.3 ,需要 `Refresh ` 才能生效。 +2. 在 Doris 2.0.3 及之后的版本,对所有的数据库都有效,在查询时,会将所有的库名和表名转换为真实的名称,再去查询,如果是从老版本升级到 2.0.3 ,需要 `Refresh ` 才能生效。 - 但是,如果库名、表名或列名只有大小写不同,例如 `Doris` 和 `doris`,则 Doris 由于歧义而无法查询它们。 + 但是,如果数据库或者表名只有大小写不同,例如 `Doris` 和 `doris`,则 Doris 由于歧义而无法查询它们。 3. 当 FE 参数的 `lower_case_table_names` 设置为 `1` 或 `2` 时,JDBC Catalog 的 `lower_case_table_names` 参数必须设置为 `true`。如果 FE 参数的 `lower_case_table_names` 设置为 `0`,则 JDBC Catalog 的参数可以为 `true` 或 `false`,默认为 `false`。这确保了 Doris 在处理内部和外部表配置时的一致性和可预测性。 @@ -114,7 +114,7 @@ select * from mysql_catalog.mysql_database.mysql_table where k1 > 1000 and k3 =' 1. 当执行类似于 `where dt = '2022-01-01'` 这样的查询时,Doris 能够将这些过滤条件下推到外部数据源,从而直接在数据源层面排除不符合条件的数据,减少了不必要的数据获取和传输。这大大提高了查询性能,同时也降低了对外部数据源的负载。 -2. 当 `enable_func_pushdown` 设置为true,会将 where 之后的函数条件也下推到外部数据源,目前仅支持 MySQL 以及 ClickHouse,如遇到 MySQL 或 ClickHouse 不支持的函数,可以将此参数设置为 false,目前 Doris 会自动识别部分 MySQL 不支持的函数以及 CLickHouse 支持的函数进行下推条件过滤,可通过 explain sql 查看。 +2. 当 `enable_func_pushdown` 设置为true,会将 where 之后的函数条件也下推到外部数据源,目前仅支持 MySQL,如遇到 MySQL 不支持的函数,可以将此参数设置为 false,目前 Doris 会自动识别部分 MySQL 不支持的函数进行下推条件过滤,可通过 explain sql 查看。 目前不会下推的函数有: @@ -123,13 +123,6 @@ select * from mysql_catalog.mysql_database.mysql_table where k1 > 1000 and k3 =' | DATE_TRUNC | | MONEY_FORMAT | -目前会下推的函数有: - -| ClickHouse | -|:--------------:| -| FROM_UNIXTIME | -| UNIX_TIMESTAMP | - ### 行数限制 如果在查询中带有 limit 关键字,Doris 会将其转译成适合不同数据源的语义。 diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java index fe397929acc4cb..795f00e7bfed9b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java @@ -27,8 +27,6 @@ import org.apache.doris.thrift.TTableDescriptor; import org.apache.doris.thrift.TTableType; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Strings; import com.google.common.collect.Maps; import lombok.Setter; @@ -49,12 +47,9 @@ public class JdbcTable extends Table { private static final Logger LOG = LogManager.getLogger(JdbcTable.class); - private static final ObjectMapper objectMapper = new ObjectMapper(); - private static final String TABLE = "table"; private static final String REAL_DATABASE = "real_database"; private static final String REAL_TABLE = "real_table"; - private static final String REAL_COLUMNS = "real_columns"; private static final String RESOURCE = "resource"; private static final String TABLE_TYPE = "table_type"; private static final String URL = "jdbc_url"; @@ -70,7 +65,6 @@ public class JdbcTable extends Table { // real name only for jdbc catalog private String realDatabaseName; private String realTableName; - private Map realColumnNames; private String jdbcTypeName; @@ -116,7 +110,7 @@ public String getInsertSql(List insertCols) { sb.append(getProperRealFullTableName(TABLE_TYPE_MAP.get(getTableTypeName()))); sb.append("("); List transformedInsertCols = insertCols.stream() - .map(col -> getProperRealColumnName(TABLE_TYPE_MAP.get(getTableTypeName()), col)) + .map(col -> databaseProperName(TABLE_TYPE_MAP.get(getTableTypeName()), col)) .collect(Collectors.toList()); sb.append(String.join(",", transformedInsertCols)); sb.append(")"); @@ -206,7 +200,6 @@ public void write(DataOutput out) throws IOException { serializeMap.put(CHECK_SUM, checkSum); serializeMap.put(REAL_DATABASE, realDatabaseName); serializeMap.put(REAL_TABLE, realTableName); - serializeMap.put(REAL_COLUMNS, objectMapper.writeValueAsString(realColumnNames)); int size = (int) serializeMap.values().stream().filter(v -> { return v != null; @@ -243,9 +236,6 @@ public void readFields(DataInput in) throws IOException { checkSum = serializeMap.get(CHECK_SUM); realDatabaseName = serializeMap.get(REAL_DATABASE); realTableName = serializeMap.get(REAL_TABLE); - String realColumnNamesJson = serializeMap.get(REAL_COLUMNS); - realColumnNames = objectMapper.readValue(realColumnNamesJson, new TypeReference>() { - }); } public String getResourceName() { @@ -273,14 +263,6 @@ public String getProperRealFullTableName(TOdbcTableType tableType) { } } - public String getProperRealColumnName(TOdbcTableType tableType, String columnName) { - if (realColumnNames == null || realColumnNames.isEmpty() || !realColumnNames.containsKey(columnName)) { - return databaseProperName(tableType, columnName); - } else { - return properNameWithRealName(tableType, realColumnNames.get(columnName)); - } - } - public String getTableTypeName() { return jdbcTypeName; } @@ -376,13 +358,14 @@ private void validate(Map properties) throws DdlException { * @param wrapEnd The character(s) to be added at the end of each name component. * @param toUpperCase If true, convert the name to upper case. * @param toLowerCase If true, convert the name to lower case. - *

- * Note: If both toUpperCase and toLowerCase are true, the name will ultimately be converted to lower case. - *

- * The name is expected to be in the format of 'schemaName.tableName'. If there is no '.', - * the function will treat the entire string as one name component. - * If there is a '.', the function will treat the string before the first '.' as the schema name - * and the string after the '.' as the table name. + *

+ * Note: If both toUpperCase and toLowerCase are true, the name will ultimately be converted to lower case. + *

+ * The name is expected to be in the format of 'schemaName.tableName'. If there is no '.', + * the function will treat the entire string as one name component. + * If there is a '.', the function will treat the string before the first '.' as the schema name + * and the string after the '.' as the table name. + * * @return The formatted name. */ public static String formatName(String name, String wrapStart, String wrapEnd, boolean toUpperCase, @@ -403,18 +386,18 @@ public static String formatName(String name, String wrapStart, String wrapEnd, b /** * Formats a database name according to the database type. - *

+ * * Rules: * - MYSQL, OCEANBASE: Wrap with backticks (`), case unchanged. Example: mySchema.myTable -> `mySchema.myTable` * - SQLSERVER: Wrap with square brackets ([]), case unchanged. Example: mySchema.myTable -> [mySchema].[myTable] * - POSTGRESQL, CLICKHOUSE, TRINO, OCEANBASE_ORACLE, SAP_HANA: Wrap with double quotes ("), case unchanged. - * Example: mySchema.myTable -> "mySchema"."myTable" + * Example: mySchema.myTable -> "mySchema"."myTable" * - ORACLE: Wrap with double quotes ("), convert to upper case. Example: mySchema.myTable -> "MYSCHEMA"."MYTABLE" * For other types, the name is returned as is. * * @param tableType The database type. * @param name The name to be formatted, expected in 'schemaName.tableName' format. If no '.', treats entire string - * as one name component. If '.', treats string before first '.' as schema name and after as table name. + * as one name component. If '.', treats string before first '.' as schema name and after as table name. * @return The formatted name. */ public static String databaseProperName(TOdbcTableType tableType, String name) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/JdbcExternalTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/JdbcExternalTable.java index 9c5159a2bd8083..a02c59080fc4eb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/JdbcExternalTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/JdbcExternalTable.java @@ -91,8 +91,6 @@ private JdbcTable toJdbcTable() { jdbcTable.setRealDatabaseName(((JdbcExternalCatalog) catalog).getJdbcClient().getRealDatabaseName(this.dbName)); jdbcTable.setRealTableName( ((JdbcExternalCatalog) catalog).getJdbcClient().getRealTableName(this.dbName, this.name)); - jdbcTable.setRealColumnNames(((JdbcExternalCatalog) catalog).getJdbcClient().getRealColumnNames(this.dbName, - this.name)); jdbcTable.setJdbcTypeName(jdbcCatalog.getDatabaseTypeName()); jdbcTable.setJdbcUrl(jdbcCatalog.getJdbcUrl()); jdbcTable.setJdbcUser(jdbcCatalog.getJdbcUser()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java index a286e8bb5f7c30..80aa8da0f71d7f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java @@ -1077,7 +1077,7 @@ private void deleteReplicaInternal(TabletSchedCtx tabletCtx, List replicas = tabletCtx.getTablet().getReplicas(); boolean otherCatchup = replicas.stream().anyMatch( - r -> r.getId() != replica.getId() + r -> r != replica && (r.getVersion() > replica.getVersion() || (r.getVersion() == replica.getVersion() && r.getLastFailedVersion() < 0))); if (!otherCatchup) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java index 27bd524c1eb082..1ed6bd027aaa64 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java @@ -592,6 +592,14 @@ public boolean useSelfSplitter() { return ret; } + public String bindBrokerName() { + Map properties = catalogProperty.getProperties(); + if (properties.containsKey(HMSExternalCatalog.BIND_BROKER_NAME)) { + return properties.get(HMSExternalCatalog.BIND_BROKER_NAME); + } + return null; + } + @Override public Collection getAllDbs() { makeSureInitialized(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java index 47562ebb1f1a5f..6e3543dfccef94 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java @@ -61,6 +61,8 @@ public class HMSExternalCatalog extends ExternalCatalog { private long lastSyncedEventId = -1L; public static final String ENABLE_SELF_SPLITTER = "enable.self.splitter"; public static final String FILE_META_CACHE_TTL_SECOND = "file.meta.cache.ttl-second"; + // broker name for file split and query scan. + public static final String BIND_BROKER_NAME = "broker.name"; private static final String PROP_ALLOW_FALLBACK_TO_SIMPLE_AUTH = "ipc.client.fallback-to-simple-auth-allowed"; // -1 means file cache no ttl set diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java index 850619cb246430..bac891eb920225 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java @@ -353,11 +353,13 @@ private Map loadPartitions(Iterable inputFormat, JobConf jobConf, - List partitionValues) throws UserException { + List partitionValues, + String bindBrokerName) throws UserException { FileCacheValue result = new FileCacheValue(); - result.setSplittable(HiveUtil.isSplittable(inputFormat, new Path(location), jobConf)); RemoteFileSystem fs = Env.getCurrentEnv().getExtMetaCacheMgr().getFsCache().getRemoteFileSystem( - new FileSystemCache.FileSystemCacheKey(FileSystemFactory.getFSIdentity(location), jobConf)); + new FileSystemCache.FileSystemCacheKey(FileSystemFactory.getFSIdentity( + location, bindBrokerName), jobConf, bindBrokerName)); + result.setSplittable(HiveUtil.isSplittable(fs, inputFormat, location, jobConf)); try { // For Tez engine, it may generate subdirectoies for "union" query. // So there may be files and directories in the table directory at the same time. eg: @@ -419,7 +421,8 @@ private FileCacheValue loadFiles(FileCacheKey key) { InputFormat inputFormat = HiveUtil.getInputFormat(jobConf, key.inputFormat, false); // TODO: This is a temp config, will remove it after the HiveSplitter is stable. if (key.useSelfSplitter) { - result = getFileCache(finalLocation, inputFormat, jobConf, key.getPartitionValues()); + result = getFileCache(finalLocation, inputFormat, jobConf, + key.getPartitionValues(), key.bindBrokerName); } else { InputSplit[] splits; String remoteUser = jobConf.get(HdfsResource.HADOOP_USER_NAME); @@ -498,23 +501,23 @@ public HivePartitionValues getPartitionValues(PartitionValueCacheKey key) { } public List getFilesByPartitionsWithCache(List partitions, - boolean useSelfSplitter) { - return getFilesByPartitions(partitions, useSelfSplitter, true); + boolean useSelfSplitter, String bindBrokerName) { + return getFilesByPartitions(partitions, useSelfSplitter, true, bindBrokerName); } public List getFilesByPartitionsWithoutCache(List partitions, - boolean useSelfSplitter) { - return getFilesByPartitions(partitions, useSelfSplitter, false); + boolean useSelfSplitter, String bindBrokerName) { + return getFilesByPartitions(partitions, useSelfSplitter, false, bindBrokerName); } private List getFilesByPartitions(List partitions, - boolean useSelfSplitter, boolean withCache) { + boolean useSelfSplitter, boolean withCache, String bindBrokerName) { long start = System.currentTimeMillis(); List keys = partitions.stream().map(p -> { FileCacheKey fileCacheKey = p.isDummyPartition() ? FileCacheKey.createDummyCacheKey(p.getDbName(), p.getTblName(), p.getPath(), - p.getInputFormat(), useSelfSplitter) - : new FileCacheKey(p.getPath(), p.getInputFormat(), p.getPartitionValues()); + p.getInputFormat(), useSelfSplitter, bindBrokerName) + : new FileCacheKey(p.getPath(), p.getInputFormat(), p.getPartitionValues(), bindBrokerName); fileCacheKey.setUseSelfSplitter(useSelfSplitter); return fileCacheKey; }).collect(Collectors.toList()); @@ -592,7 +595,7 @@ public void invalidateTableCache(String dbName, String tblName) { HivePartition partition = partitionCache.getIfPresent(partKey); if (partition != null) { fileCacheRef.get().invalidate(new FileCacheKey(partition.getPath(), - null, partition.getPartitionValues())); + null, partition.getPartitionValues(), null)); partitionCache.invalidate(partKey); } } @@ -610,7 +613,7 @@ public void invalidateTableCache(String dbName, String tblName) { * and FE will exit if some network problems occur. * */ FileCacheKey fileCacheKey = FileCacheKey.createDummyCacheKey( - dbName, tblName, null, null, false); + dbName, tblName, null, null, false, null); fileCacheRef.get().invalidate(fileCacheKey); } } @@ -625,7 +628,7 @@ public void invalidatePartitionCache(String dbName, String tblName, String parti HivePartition partition = partitionCache.getIfPresent(partKey); if (partition != null) { fileCacheRef.get().invalidate(new FileCacheKey(partition.getPath(), - null, partition.getPartitionValues())); + null, partition.getPartitionValues(), null)); partitionCache.invalidate(partKey); } } @@ -771,7 +774,7 @@ public AtomicReference> getFileCacheR } public List getFilesByTransaction(List partitions, ValidWriteIdList validWriteIds, - boolean isFullAcid, long tableId) { + boolean isFullAcid, long tableId, String bindBrokerName) { List fileCacheValues = Lists.newArrayList(); String remoteUser = jobConf.get(HdfsResource.HADOOP_USER_NAME); try { @@ -802,7 +805,8 @@ public List getFilesByTransaction(List partitions String acidVersionPath = new Path(baseOrDeltaPath, "_orc_acid_version").toUri().toString(); RemoteFileSystem fs = Env.getCurrentEnv().getExtMetaCacheMgr().getFsCache().getRemoteFileSystem( new FileSystemCache.FileSystemCacheKey( - FileSystemFactory.getFSIdentity(baseOrDeltaPath.toUri().toString()), jobConf)); + FileSystemFactory.getFSIdentity(baseOrDeltaPath.toUri().toString(), + bindBrokerName), jobConf, bindBrokerName)); Status status = fs.exists(acidVersionPath); if (status != Status.OK) { if (status.getErrCode() == ErrCode.NOT_FOUND) { @@ -823,7 +827,9 @@ public List getFilesByTransaction(List partitions for (AcidUtils.ParsedDelta delta : directory.getCurrentDirectories()) { String location = delta.getPath().toString(); RemoteFileSystem fs = Env.getCurrentEnv().getExtMetaCacheMgr().getFsCache().getRemoteFileSystem( - new FileSystemCache.FileSystemCacheKey(FileSystemFactory.getFSIdentity(location), jobConf)); + new FileSystemCache.FileSystemCacheKey( + FileSystemFactory.getFSIdentity(location, bindBrokerName), + jobConf, bindBrokerName)); RemoteFiles locatedFiles = fs.listLocatedFiles(location, true, false); if (delta.isDeleteDelta()) { List deleteDeltaFileNames = locatedFiles.files().stream().map(f -> f.getName()).filter( @@ -841,7 +847,9 @@ public List getFilesByTransaction(List partitions if (directory.getBaseDirectory() != null) { String location = directory.getBaseDirectory().toString(); RemoteFileSystem fs = Env.getCurrentEnv().getExtMetaCacheMgr().getFsCache().getRemoteFileSystem( - new FileSystemCache.FileSystemCacheKey(FileSystemFactory.getFSIdentity(location), jobConf)); + new FileSystemCache.FileSystemCacheKey( + FileSystemFactory.getFSIdentity(location, bindBrokerName), + jobConf, bindBrokerName)); RemoteFiles locatedFiles = fs.listLocatedFiles(location, true, false); locatedFiles.files().stream().filter( f -> f.getName().startsWith(HIVE_TRANSACTIONAL_ORC_BUCKET_PREFIX)) @@ -939,6 +947,8 @@ public static class FileCacheKey { private String location; // not in key private String inputFormat; + // Broker name for file split and file scan. + private String bindBrokerName; // Temp variable, use self file splitter or use InputFormat.getSplits. // Will remove after self splitter is stable. private boolean useSelfSplitter; @@ -947,16 +957,18 @@ public static class FileCacheKey { // partitionValues would be ["part1", "part2"] protected List partitionValues; - public FileCacheKey(String location, String inputFormat, List partitionValues) { + public FileCacheKey(String location, String inputFormat, List partitionValues, String bindBrokerName) { this.location = location; this.inputFormat = inputFormat; this.partitionValues = partitionValues == null ? Lists.newArrayList() : partitionValues; this.useSelfSplitter = true; + this.bindBrokerName = bindBrokerName; } public static FileCacheKey createDummyCacheKey(String dbName, String tblName, String location, - String inputFormat, boolean useSelfSplitter) { - FileCacheKey fileCacheKey = new FileCacheKey(location, inputFormat, null); + String inputFormat, boolean useSelfSplitter, + String bindBrokerName) { + FileCacheKey fileCacheKey = new FileCacheKey(location, inputFormat, null, bindBrokerName); fileCacheKey.dummyKey = dbName + "." + tblName; fileCacheKey.useSelfSplitter = useSelfSplitter; return fileCacheKey; diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java index b4e2604bfc9880..97052caccd6e2f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java @@ -67,15 +67,10 @@ public abstract class JdbcClient { // only used when isLowerCaseTableNames = true. protected final ConcurrentHashMap lowerDBToRealDB = new ConcurrentHashMap<>(); // only used when isLowerCaseTableNames = true. - protected final ConcurrentHashMap> lowerTableToRealTable - = new ConcurrentHashMap<>(); - // only used when isLowerCaseTableNames = true. - protected final ConcurrentHashMap>> - lowerColumnToRealColumn = new ConcurrentHashMap<>(); + protected final ConcurrentHashMap lowerTableToRealTable = new ConcurrentHashMap<>(); private final AtomicBoolean dbNamesLoaded = new AtomicBoolean(false); private final AtomicBoolean tableNamesLoaded = new AtomicBoolean(false); - private final AtomicBoolean columnNamesLoaded = new AtomicBoolean(false); public static JdbcClient createJdbcClient(JdbcClientConfig jdbcClientConfig) { String dbType = parseDbType(jdbcClientConfig.getJdbcUrl()); @@ -183,7 +178,7 @@ public void close(AutoCloseable... closeables) { if (closeable != null) { try { closeable.close(); - } catch (Exception e) { + } catch (Exception e) { throw new JdbcClientException("Can not close : ", e); } } @@ -191,10 +186,8 @@ public void close(AutoCloseable... closeables) { } // This part used to process meta-information of database, table and column. - /** * get all database name through JDBC - * * @return list of database names */ public List getDatabaseNameList() { @@ -215,8 +208,6 @@ public List getDatabaseNameList() { if (isLowerCaseTableNames) { lowerDBToRealDB.put(databaseName.toLowerCase(), databaseName); databaseName = databaseName.toLowerCase(); - } else { - lowerDBToRealDB.put(databaseName, databaseName); } tempDatabaseNames.add(databaseName); } @@ -246,20 +237,20 @@ public List getDatabaseNameList() { * get all tables of one database */ public List getTablesNameList(String dbName) { + String currentDbName = dbName; List tablesName = Lists.newArrayList(); String[] tableTypes = getTableTypes(); - String finalDbName = getRealDatabaseName(dbName); + if (isLowerCaseTableNames) { + currentDbName = getRealDatabaseName(dbName); + } + String finalDbName = currentDbName; processTable(finalDbName, null, tableTypes, (rs) -> { try { while (rs.next()) { String tableName = rs.getString("TABLE_NAME"); if (isLowerCaseTableNames) { - lowerTableToRealTable.putIfAbsent(finalDbName, new ConcurrentHashMap<>()); - lowerTableToRealTable.get(finalDbName).put(tableName.toLowerCase(), tableName); + lowerTableToRealTable.put(tableName.toLowerCase(), tableName); tableName = tableName.toLowerCase(); - } else { - lowerTableToRealTable.putIfAbsent(finalDbName, new ConcurrentHashMap<>()); - lowerTableToRealTable.get(finalDbName).put(tableName, tableName); } tablesName.add(tableName); } @@ -271,10 +262,16 @@ public List getTablesNameList(String dbName) { } public boolean isTableExist(String dbName, String tableName) { + String currentDbName = dbName; + String currentTableName = tableName; final boolean[] isExist = {false}; + if (isLowerCaseTableNames) { + currentDbName = getRealDatabaseName(dbName); + currentTableName = getRealTableName(dbName, tableName); + } String[] tableTypes = getTableTypes(); - String finalDbName = getRealDatabaseName(dbName); - String finalTableName = getRealTableName(dbName, tableName); + String finalTableName = currentTableName; + String finalDbName = currentDbName; processTable(finalDbName, finalTableName, tableTypes, (rs) -> { try { if (rs.next()) { @@ -295,25 +292,23 @@ public List getJdbcColumnsInfo(String dbName, String tableName) Connection conn = getConnection(); ResultSet rs = null; List tableSchema = Lists.newArrayList(); - String finalDbName = getRealDatabaseName(dbName); - String finalTableName = getRealTableName(dbName, tableName); + // if isLowerCaseTableNames == true, tableName is lower case + // but databaseMetaData.getColumns() is case sensitive + String currentDbName = dbName; + String currentTableName = tableName; + if (isLowerCaseTableNames) { + currentDbName = getRealDatabaseName(dbName); + currentTableName = getRealTableName(dbName, tableName); + } + String finalDbName = currentDbName; + String finalTableName = currentTableName; try { DatabaseMetaData databaseMetaData = conn.getMetaData(); String catalogName = getCatalogName(conn); rs = getColumns(databaseMetaData, catalogName, finalDbName, finalTableName); while (rs.next()) { - lowerColumnToRealColumn.putIfAbsent(finalDbName, new ConcurrentHashMap<>()); - lowerColumnToRealColumn.get(finalDbName).putIfAbsent(finalTableName, new ConcurrentHashMap<>()); JdbcFieldSchema field = new JdbcFieldSchema(); - String columnName = rs.getString("COLUMN_NAME"); - if (isLowerCaseTableNames) { - lowerColumnToRealColumn.get(finalDbName).get(finalTableName) - .put(columnName.toLowerCase(), columnName); - columnName = columnName.toLowerCase(); - } else { - lowerColumnToRealColumn.get(finalDbName).get(finalTableName).put(columnName, columnName); - } - field.setColumnName(columnName); + field.setColumnName(rs.getString("COLUMN_NAME")); field.setDataType(rs.getInt("DATA_TYPE")); field.setDataTypeName(rs.getString("TYPE_NAME")); /* @@ -357,9 +352,11 @@ public List getColumnsFromJdbc(String dbName, String tableName) { } public String getRealDatabaseName(String dbname) { - if (lowerDBToRealDB == null - || lowerDBToRealDB.isEmpty() - || !lowerDBToRealDB.containsKey(dbname)) { + if (!isLowerCaseTableNames) { + return dbname; + } + + if (lowerDBToRealDB.isEmpty() || !lowerDBToRealDB.containsKey(dbname)) { loadDatabaseNamesIfNeeded(); } @@ -367,34 +364,15 @@ public String getRealDatabaseName(String dbname) { } public String getRealTableName(String dbName, String tableName) { - String realDbName = getRealDatabaseName(dbName); - if (lowerTableToRealTable == null - || lowerTableToRealTable.isEmpty() - || !lowerTableToRealTable.containsKey(realDbName) - || lowerTableToRealTable.get(realDbName) == null - || lowerTableToRealTable.get(realDbName).isEmpty() - || !lowerTableToRealTable.get(realDbName).containsKey(tableName) - || lowerTableToRealTable.get(realDbName).get(tableName) == null) { - loadTableNamesIfNeeded(dbName); + if (!isLowerCaseTableNames) { + return tableName; } - return lowerTableToRealTable.get(realDbName).get(tableName); - } - - public Map getRealColumnNames(String dbName, String tableName) { - String realDbName = getRealDatabaseName(dbName); - String realTableName = getRealTableName(dbName, tableName); - if (lowerColumnToRealColumn == null - || lowerColumnToRealColumn.isEmpty() - || !lowerColumnToRealColumn.containsKey(realDbName) - || lowerColumnToRealColumn.get(realDbName) == null - || lowerColumnToRealColumn.get(realDbName).isEmpty() - || !lowerColumnToRealColumn.get(realDbName).containsKey(realTableName) - || lowerColumnToRealColumn.get(realDbName).get(realTableName) == null - || lowerColumnToRealColumn.get(realDbName).get(realTableName).isEmpty()) { - loadColumnNamesIfNeeded(dbName, tableName); + if (lowerTableToRealTable.isEmpty() || !lowerTableToRealTable.containsKey(tableName)) { + loadTableNamesIfNeeded(dbName); } - return lowerColumnToRealColumn.get(realDbName).get(realTableName); + + return lowerTableToRealTable.get(tableName); } private void loadDatabaseNamesIfNeeded() { @@ -409,12 +387,6 @@ private void loadTableNamesIfNeeded(String dbName) { } } - private void loadColumnNamesIfNeeded(String dbName, String tableName) { - if (columnNamesLoaded.compareAndSet(false, true)) { - getJdbcColumnsInfo(dbName, tableName); - } - } - // protected methods,for subclass to override protected String getCatalogName(Connection conn) throws SQLException { return null; @@ -439,7 +411,7 @@ protected String[] getTableTypes() { } protected void processTable(String dbName, String tableName, String[] tableTypes, - Consumer resultSetConsumer) { + Consumer resultSetConsumer) { Connection conn = getConnection(); ResultSet rs = null; try { @@ -463,7 +435,7 @@ protected boolean isTableModified(String modifiedTableName, String actualTableNa } protected ResultSet getColumns(DatabaseMetaData databaseMetaData, String catalogName, String schemaName, - String tableName) throws SQLException { + String tableName) throws SQLException { return databaseMetaData.getColumns(catalogName, schemaName, tableName, null); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcMySQLClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcMySQLClient.java index 3273f4b7591c21..61ba2a0db47d07 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcMySQLClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcMySQLClient.java @@ -35,7 +35,6 @@ import java.sql.Statement; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; import java.util.function.Consumer; public class JdbcMySQLClient extends JdbcClient { @@ -118,27 +117,21 @@ public List getJdbcColumnsInfo(String dbName, String tableName) Connection conn = getConnection(); ResultSet rs = null; List tableSchema = com.google.common.collect.Lists.newArrayList(); - String finalDbName = getRealDatabaseName(dbName); - String finalTableName = getRealTableName(dbName, tableName); + // if isLowerCaseTableNames == true, tableName is lower case + // but databaseMetaData.getColumns() is case sensitive + if (isLowerCaseTableNames) { + dbName = lowerDBToRealDB.get(dbName); + tableName = lowerTableToRealTable.get(tableName); + } try { DatabaseMetaData databaseMetaData = conn.getMetaData(); String catalogName = getCatalogName(conn); - rs = getColumns(databaseMetaData, catalogName, finalDbName, finalTableName); + rs = getColumns(databaseMetaData, catalogName, dbName, tableName); List primaryKeys = getPrimaryKeys(databaseMetaData, catalogName, dbName, tableName); Map mapFieldtoType = null; while (rs.next()) { - lowerColumnToRealColumn.putIfAbsent(finalDbName, new ConcurrentHashMap<>()); - lowerColumnToRealColumn.get(finalDbName).putIfAbsent(finalTableName, new ConcurrentHashMap<>()); JdbcFieldSchema field = new JdbcFieldSchema(); - String columnName = rs.getString("COLUMN_NAME"); - if (isLowerCaseTableNames) { - lowerColumnToRealColumn.get(finalDbName).get(finalTableName) - .put(columnName.toLowerCase(), columnName); - columnName = columnName.toLowerCase(); - } else { - lowerColumnToRealColumn.get(finalDbName).get(finalTableName).put(columnName, columnName); - } - field.setColumnName(columnName); + field.setColumnName(rs.getString("COLUMN_NAME")); field.setDataType(rs.getInt("DATA_TYPE")); // in mysql-jdbc-connector-8.0.*, TYPE_NAME of the HLL column in doris will be "UNKNOWN" diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcOracleClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcOracleClient.java index 270b5b4bdc8b2f..d0a9f2c3de7ab3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcOracleClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcOracleClient.java @@ -28,7 +28,6 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.List; -import java.util.concurrent.ConcurrentHashMap; public class JdbcOracleClient extends JdbcClient { @@ -62,8 +61,6 @@ public List getDatabaseNameList() { if (isLowerCaseTableNames) { lowerDBToRealDB.put(databaseName.toLowerCase(), databaseName); databaseName = databaseName.toLowerCase(); - } else { - lowerDBToRealDB.put(databaseName, databaseName); } tempDatabaseNames.add(databaseName); } @@ -94,8 +91,14 @@ public List getJdbcColumnsInfo(String dbName, String tableName) Connection conn = getConnection(); ResultSet rs = null; List tableSchema = Lists.newArrayList(); - String finalDbName = getRealDatabaseName(dbName); - String finalTableName = getRealTableName(dbName, tableName); + String currentDbName = dbName; + String currentTableName = tableName; + if (isLowerCaseTableNames) { + currentDbName = getRealDatabaseName(dbName); + currentTableName = getRealTableName(dbName, tableName); + } + String finalDbName = currentDbName; + String finalTableName = currentTableName; try { DatabaseMetaData databaseMetaData = conn.getMetaData(); String catalogName = getCatalogName(conn); @@ -116,18 +119,8 @@ public List getJdbcColumnsInfo(String dbName, String tableName) if (isModify && isTableModified(rs.getString("TABLE_NAME"), finalTableName)) { continue; } - lowerColumnToRealColumn.putIfAbsent(finalDbName, new ConcurrentHashMap<>()); - lowerColumnToRealColumn.get(finalDbName).putIfAbsent(finalTableName, new ConcurrentHashMap<>()); JdbcFieldSchema field = new JdbcFieldSchema(); - String columnName = rs.getString("COLUMN_NAME"); - if (isLowerCaseTableNames) { - lowerColumnToRealColumn.get(finalDbName).get(finalTableName) - .put(columnName.toLowerCase(), columnName); - columnName = columnName.toLowerCase(); - } else { - lowerColumnToRealColumn.get(finalDbName).get(finalTableName).put(columnName, columnName); - } - field.setColumnName(columnName); + field.setColumnName(rs.getString("COLUMN_NAME")); field.setDataType(rs.getInt("DATA_TYPE")); field.setDataTypeName(rs.getString("TYPE_NAME")); /* diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/hive/util/HiveUtil.java b/fe/fe-core/src/main/java/org/apache/doris/external/hive/util/HiveUtil.java index 4bf01910f826ff..deb048b59439ec 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/hive/util/HiveUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/hive/util/HiveUtil.java @@ -24,6 +24,8 @@ import org.apache.doris.common.AnalysisException; import org.apache.doris.common.UserException; import org.apache.doris.fs.FileSystemFactory; +import org.apache.doris.fs.remote.BrokerFileSystem; +import org.apache.doris.fs.remote.RemoteFileSystem; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; @@ -188,12 +190,17 @@ private static Type convertHiveTypeToiveDoris(TypeInfo hiveTypeInfo) { } } - public static boolean isSplittable(InputFormat inputFormat, Path path, JobConf jobConf) { + public static boolean isSplittable(RemoteFileSystem remoteFileSystem, InputFormat inputFormat, + String location, JobConf jobConf) throws UserException { + if (remoteFileSystem instanceof BrokerFileSystem) { + return ((BrokerFileSystem) remoteFileSystem) + .isSplittable(location, inputFormat.getClass().getCanonicalName()); + } + // ORC uses a custom InputFormat but is always splittable if (inputFormat.getClass().getSimpleName().equals("OrcInputFormat")) { return true; } - // use reflection to get isSplitable method on FileInputFormat // ATTN: the method name is actually "isSplitable", but the right spell is "isSplittable" Method method = null; @@ -209,6 +216,7 @@ public static boolean isSplittable(InputFormat inputFormat, Path path, Job if (method == null) { return false; } + Path path = new Path(location); try { method.setAccessible(true); return (boolean) method.invoke(inputFormat, FileSystemFactory.getNativeByPath(path, jobConf), path); diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemCache.java b/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemCache.java index edc746ebe24e53..7946dd5e8a7cea 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemCache.java @@ -53,7 +53,7 @@ public RemoteFileSystem load(FileSystemCacheKey key) { } private RemoteFileSystem loadFileSystem(FileSystemCacheKey key) { - return FileSystemFactory.getByType(key.type, key.conf); + return FileSystemFactory.getRemoteFileSystem(key.type, key.conf, key.bindBrokerName); } public RemoteFileSystem getRemoteFileSystem(FileSystemCacheKey key) { @@ -69,11 +69,13 @@ public static class FileSystemCacheKey { // eg: hdfs://nameservices1 private final String fsIdent; private final JobConf conf; + private final String bindBrokerName; - public FileSystemCacheKey(Pair fs, JobConf conf) { + public FileSystemCacheKey(Pair fs, JobConf conf, String bindBrokerName) { this.type = fs.first; this.fsIdent = fs.second; this.conf = conf; + this.bindBrokerName = bindBrokerName; } @Override @@ -84,14 +86,21 @@ public boolean equals(Object obj) { if (!(obj instanceof FileSystemCacheKey)) { return false; } - return type.equals(((FileSystemCacheKey) obj).type) + boolean equalsWithoutBroker = type.equals(((FileSystemCacheKey) obj).type) && fsIdent.equals(((FileSystemCacheKey) obj).fsIdent) && conf == ((FileSystemCacheKey) obj).conf; + if (bindBrokerName == null) { + return equalsWithoutBroker; + } + return equalsWithoutBroker && bindBrokerName.equals(((FileSystemCacheKey) obj).bindBrokerName); } @Override public int hashCode() { - return Objects.hash(conf, fsIdent, type); + if (bindBrokerName == null) { + return Objects.hash(conf, fsIdent, type); + } + return Objects.hash(conf, fsIdent, type, bindBrokerName); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemFactory.java b/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemFactory.java index 3837a7eb95be50..e54a73bbff3062 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemFactory.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemFactory.java @@ -56,9 +56,11 @@ public static RemoteFileSystem get(String name, StorageBackend.StorageType type, } } - public static Pair getFSIdentity(String location) { + public static Pair getFSIdentity(String location, String bindBrokerName) { FileSystemType fsType; - if (S3Util.isObjStorage(location)) { + if (bindBrokerName != null) { + fsType = FileSystemType.BROKER; + } else if (S3Util.isObjStorage(location)) { if (S3Util.isHdfsOnOssEndpoint(location)) { // if hdfs service is enabled on oss, use hdfs lib to access oss. fsType = FileSystemType.DFS; @@ -83,7 +85,8 @@ public static Pair getFSIdentity(String location) { return Pair.of(fsType, fsIdent); } - public static RemoteFileSystem getByType(FileSystemType type, Configuration conf) { + public static RemoteFileSystem getRemoteFileSystem(FileSystemType type, Configuration conf, + String bindBrokerName) { Map properties = new HashMap<>(); conf.iterator().forEachRemaining(e -> properties.put(e.getKey(), e.getValue())); switch (type) { @@ -95,6 +98,8 @@ public static RemoteFileSystem getByType(FileSystemType type, Configuration conf return new OFSFileSystem(properties); case JFS: return new JFSFileSystem(properties); + case BROKER: + return new BrokerFileSystem(bindBrokerName, properties); default: throw new IllegalStateException("Not supported file system type: " + type); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemType.java b/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemType.java index 5ddea01174441b..018130f0c14d58 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/FileSystemType.java @@ -22,5 +22,6 @@ public enum FileSystemType { DFS, OFS, JFS, + BROKER, FILE } diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java index cb87150928869f..ef8d484bda99b8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java @@ -24,8 +24,10 @@ import org.apache.doris.common.AnalysisException; import org.apache.doris.common.ClientPool; import org.apache.doris.common.Pair; +import org.apache.doris.common.UserException; import org.apache.doris.common.util.BrokerUtil; import org.apache.doris.datasource.property.PropertyConverter; +import org.apache.doris.fs.RemoteFiles; import org.apache.doris.fs.operations.BrokerFileOperations; import org.apache.doris.fs.operations.OpParams; import org.apache.doris.service.FrontendOptions; @@ -34,6 +36,8 @@ import org.apache.doris.thrift.TBrokerDeletePathRequest; import org.apache.doris.thrift.TBrokerFD; import org.apache.doris.thrift.TBrokerFileStatus; +import org.apache.doris.thrift.TBrokerIsSplittableRequest; +import org.apache.doris.thrift.TBrokerIsSplittableResponse; import org.apache.doris.thrift.TBrokerListPathRequest; import org.apache.doris.thrift.TBrokerListResponse; import org.apache.doris.thrift.TBrokerOperationStatus; @@ -65,6 +69,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -556,6 +561,88 @@ public Status delete(String remotePath) { return Status.OK; } + @Override + public RemoteFiles listLocatedFiles(String remotePath, boolean onlyFiles, boolean recursive) throws UserException { + // get a proper broker + Pair pair = getBroker(); + if (pair == null) { + throw new UserException("failed to get broker client"); + } + TPaloBrokerService.Client client = pair.first; + TNetworkAddress address = pair.second; + + // invoke broker 'listLocatedFiles' interface + boolean needReturn = true; + try { + TBrokerListPathRequest req = new TBrokerListPathRequest(TBrokerVersion.VERSION_ONE, remotePath, + recursive, properties); + req.setOnlyFiles(onlyFiles); + TBrokerListResponse response = client.listLocatedFiles(req); + TBrokerOperationStatus operationStatus = response.getOpStatus(); + if (operationStatus.getStatusCode() != TBrokerOperationStatusCode.OK) { + throw new UserException("failed to listLocatedFiles, remote path: " + remotePath + ". msg: " + + operationStatus.getMessage() + ", broker: " + BrokerUtil.printBroker(name, address)); + } + List result = new ArrayList<>(); + List fileStatus = response.getFiles(); + for (TBrokerFileStatus tFile : fileStatus) { + org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(tFile.path); + RemoteFile file = new RemoteFile(path.getName(), path, !tFile.isDir, tFile.isDir, tFile.size, + tFile.getBlockSize(), tFile.getModificationTime(), null /* blockLocations is null*/); + result.add(file); + } + LOG.info("finished to listLocatedFiles, remote path {}. get files: {}", remotePath, result); + return new RemoteFiles(result); + } catch (TException e) { + needReturn = false; + throw new UserException("failed to listLocatedFiles, remote path: " + + remotePath + ". msg: " + e.getMessage() + ", broker: " + BrokerUtil.printBroker(name, address)); + } finally { + if (needReturn) { + ClientPool.brokerPool.returnObject(address, client); + } else { + ClientPool.brokerPool.invalidateObject(address, client); + } + } + } + + public boolean isSplittable(String remotePath, String inputFormat) throws UserException { + // get a proper broker + Pair pair = getBroker(); + if (pair == null) { + throw new UserException("failed to get broker client"); + } + TPaloBrokerService.Client client = pair.first; + TNetworkAddress address = pair.second; + + // invoke 'isSplittable' interface + boolean needReturn = true; + try { + TBrokerIsSplittableRequest req = new TBrokerIsSplittableRequest().setVersion(TBrokerVersion.VERSION_ONE) + .setPath(remotePath).setInputFormat(inputFormat).setProperties(properties); + TBrokerIsSplittableResponse response = client.isSplittable(req); + TBrokerOperationStatus operationStatus = response.getOpStatus(); + if (operationStatus.getStatusCode() != TBrokerOperationStatusCode.OK) { + throw new UserException("failed to get path isSplittable, remote path: " + remotePath + ". msg: " + + operationStatus.getMessage() + ", broker: " + BrokerUtil.printBroker(name, address)); + } + boolean result = response.isSplittable(); + LOG.info("finished to get path isSplittable, remote path {} with format {}, isSplittable: {}", + remotePath, inputFormat, result); + return result; + } catch (TException e) { + needReturn = false; + throw new UserException("failed to get path isSplittable, remote path: " + + remotePath + ". msg: " + e.getMessage() + ", broker: " + BrokerUtil.printBroker(name, address)); + } finally { + if (needReturn) { + ClientPool.brokerPool.returnObject(address, client); + } else { + ClientPool.brokerPool.invalidateObject(address, client); + } + } + } + // List files in remotePath @Override public Status list(String remotePath, List result, boolean fileNameOnly) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java index 7727bad0293aec..d04dfeca359407 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java @@ -965,6 +965,9 @@ private static void handleMigration(ListMultimap tabletMet for (int i = 0; i < tabletMetaList.size(); i++) { long tabletId = tabletIds.get(i); TabletMeta tabletMeta = tabletMetaList.get(i); + if (tabletMeta == TabletInvertedIndex.NOT_EXIST_TABLET_META) { + continue; + } // always get old schema hash(as effective one) int effectiveSchemaHash = tabletMeta.getOldSchemaHash(); StorageMediaMigrationTask task = new StorageMediaMigrationTask(backendId, tabletId, diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java index 4a413158b3aaeb..a3ac99ba359375 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java @@ -69,7 +69,7 @@ import org.apache.doris.nereids.trees.UnaryNode; import org.apache.doris.nereids.trees.expressions.AggregateExpression; import org.apache.doris.nereids.trees.expressions.CTEId; -import org.apache.doris.nereids.trees.expressions.EqualTo; +import org.apache.doris.nereids.trees.expressions.EqualPredicate; import org.apache.doris.nereids.trees.expressions.ExprId; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; @@ -1114,7 +1114,7 @@ public PlanFragment visitPhysicalHashJoin( JoinType joinType = hashJoin.getJoinType(); List execEqConjuncts = hashJoin.getHashJoinConjuncts().stream() - .map(EqualTo.class::cast) + .map(EqualPredicate.class::cast) .map(e -> JoinUtils.swapEqualToForChildrenOrder(e, hashJoin.left().getOutputSet())) .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java index d63d3b902a10d5..a3f800340b9ec5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java @@ -61,6 +61,7 @@ import org.apache.doris.nereids.rules.rewrite.EliminateNullAwareLeftAntiJoin; import org.apache.doris.nereids.rules.rewrite.EliminateOrderByConstant; import org.apache.doris.nereids.rules.rewrite.EliminateSort; +import org.apache.doris.nereids.rules.rewrite.EliminateSortUnderSubquery; import org.apache.doris.nereids.rules.rewrite.EliminateUnnecessaryProject; import org.apache.doris.nereids.rules.rewrite.EnsureProjectOnTopJoin; import org.apache.doris.nereids.rules.rewrite.ExtractAndNormalizeWindowExpression; @@ -119,6 +120,7 @@ public class Rewriter extends AbstractBatchJobExecutor { topic("Plan Normalization", topDown( new EliminateOrderByConstant(), + new EliminateSortUnderSubquery(), new EliminateGroupByConstant(), // MergeProjects depends on this rule new LogicalSubQueryAliasToLogicalProject(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterGenerator.java index 0243326b106755..de13b7adb70579 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterGenerator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterGenerator.java @@ -281,11 +281,10 @@ private void pushDownRuntimeFilterCommon(PhysicalHashJoin legalTypes = Arrays.stream(TRuntimeFilterType.values()) .filter(type -> (type.getValue() & ctx.getSessionVariable().getRuntimeFilterType()) > 0) .collect(Collectors.toList()); - // TODO: some complex situation cannot be handled now, see testPushDownThroughJoin. - // we will support it in later version. - for (int i = 0; i < join.getHashJoinConjuncts().size(); i++) { + List hashJoinConjuncts = join.getEqualToConjuncts(); + for (int i = 0; i < hashJoinConjuncts.size(); i++) { EqualTo equalTo = ((EqualTo) JoinUtils.swapEqualToForChildrenOrder( - (EqualTo) join.getHashJoinConjuncts().get(i), join.left().getOutputSet())); + hashJoinConjuncts.get(i), join.left().getOutputSet())); for (TRuntimeFilterType type : legalTypes) { //bitmap rf is generated by nested loop join. if (type == TRuntimeFilterType.BITMAP) { @@ -525,7 +524,7 @@ private void analyzeRuntimeFilterPushDownIntoCTEInfos(PhysicalHashJoin conditions = curJoin.getHashJoinConjuncts(); boolean inSameEqualSet = false; - for (Expression e : conditions) { + for (EqualTo e : curJoin.getEqualToConjuncts()) { if (e instanceof EqualTo) { - SlotReference oneSide = (SlotReference) ((EqualTo) e).left(); - SlotReference anotherSide = (SlotReference) ((EqualTo) e).right(); + SlotReference oneSide = (SlotReference) e.left(); + SlotReference anotherSide = (SlotReference) e.right(); if (anotherSideSlotSet.contains(oneSide) && anotherSideSlotSet.contains(anotherSide)) { inSameEqualSet = true; break; diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/TryEliminateUninterestedPredicates.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/TryEliminateUninterestedPredicates.java index 79bd0a54bf5830..b9c9f3732e9512 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/TryEliminateUninterestedPredicates.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/TryEliminateUninterestedPredicates.java @@ -109,7 +109,8 @@ public Expression visit(Expression originExpr, Context parentContext) { public Expression visitAnd(And and, Context parentContext) { Expression left = and.left(); Context leftContext = new Context(); - Expression newLeft = this.visit(left, leftContext); + Expression newLeft = left.accept(this, leftContext); + if (leftContext.childrenContainsNonInterestedSlots) { newLeft = BooleanLiteral.TRUE; } @@ -122,7 +123,7 @@ public Expression visitAnd(And and, Context parentContext) { } Expression expr = new And(newLeft, newRight).accept(FoldConstantRuleOnFE.INSTANCE, expressionRewriteContext); parentContext.childrenContainsInterestedSlots = - rightContext.childrenContainsInterestedSlots || leftContext.childrenContainsInterestedSlots; + rightContext.childrenContainsInterestedSlots || leftContext.childrenContainsInterestedSlots; return expr; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/EliminateOuterJoin.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/EliminateOuterJoin.java index 83cc37ed0b3d18..c2dcafbee435fb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/EliminateOuterJoin.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/EliminateOuterJoin.java @@ -19,17 +19,23 @@ import org.apache.doris.nereids.rules.Rule; import org.apache.doris.nereids.rules.RuleType; +import org.apache.doris.nereids.trees.expressions.EqualPredicate; import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.IsNull; +import org.apache.doris.nereids.trees.expressions.Not; import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.plans.JoinType; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.logical.LogicalJoin; +import org.apache.doris.nereids.util.JoinUtils; import org.apache.doris.nereids.util.TypeUtils; import org.apache.doris.nereids.util.Utils; import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet.Builder; +import com.google.common.collect.Sets; +import java.util.Collection; import java.util.HashSet; import java.util.Optional; import java.util.Set; @@ -63,6 +69,45 @@ public Rule build() { } JoinType newJoinType = tryEliminateOuterJoin(join.getJoinType(), canFilterLeftNull, canFilterRightNull); + Set conjuncts = Sets.newHashSet(); + conjuncts.addAll(filter.getConjuncts()); + boolean conjunctsChanged = false; + if (!notNullSlots.isEmpty()) { + for (Slot slot : notNullSlots) { + Not isNotNull = new Not(new IsNull(slot)); + isNotNull.isGeneratedIsNotNull = true; + conjunctsChanged |= conjuncts.add(isNotNull); + } + } + if (newJoinType.isInnerJoin()) { + /* + * for example: (A left join B on A.a=B.b) join C on B.x=C.x + * inner join condition B.x=C.x implies 'B.x is not null', + * by which the left outer join could be eliminated. Finally, the join transformed to + * (A join B on A.a=B.b) join C on B.x=C.x. + * This elimination can be processed recursively. + * + * TODO: is_not_null can also be inferred from A < B and so on + */ + conjunctsChanged |= join.getHashJoinConjuncts().stream() + .map(EqualPredicate.class::cast) + .map(equalTo -> JoinUtils.swapEqualToForChildrenOrder(equalTo, join.left().getOutputSet())) + .anyMatch(equalTo -> createIsNotNullIfNecessary(equalTo, conjuncts)); + + JoinUtils.JoinSlotCoverageChecker checker = new JoinUtils.JoinSlotCoverageChecker( + join.left().getOutput(), + join.right().getOutput()); + conjunctsChanged |= join.getOtherJoinConjuncts().stream() + .filter(EqualPredicate.class::isInstance) + .filter(equalTo -> checker.isHashJoinCondition((EqualPredicate) equalTo)) + .map(equalTo -> JoinUtils.swapEqualToForChildrenOrder((EqualPredicate) equalTo, + join.left().getOutputSet())) + .anyMatch(equalTo -> createIsNotNullIfNecessary(equalTo, conjuncts)); + } + if (conjunctsChanged) { + return filter.withConjuncts(conjuncts.stream().collect(ImmutableSet.toImmutableSet())) + .withChildren(join.withJoinType(newJoinType)); + } return filter.withChildren(join.withJoinType(newJoinType)); }).toRule(RuleType.ELIMINATE_OUTER_JOIN); } @@ -85,4 +130,19 @@ private JoinType tryEliminateOuterJoin(JoinType joinType, boolean canFilterLeftN } return joinType; } + + private boolean createIsNotNullIfNecessary(EqualPredicate swapedEqualTo, Collection container) { + boolean containerChanged = false; + if (swapedEqualTo.left().nullable()) { + Not not = new Not(new IsNull(swapedEqualTo.left())); + not.isGeneratedIsNotNull = true; + containerChanged |= container.add(not); + } + if (swapedEqualTo.right().nullable()) { + Not not = new Not(new IsNull(swapedEqualTo.right())); + not.isGeneratedIsNotNull = true; + containerChanged |= container.add(not); + } + return containerChanged; + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/EliminateSortUnderSubquery.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/EliminateSortUnderSubquery.java new file mode 100644 index 00000000000000..298b632204e031 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/EliminateSortUnderSubquery.java @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.rules.rewrite; + +import org.apache.doris.nereids.rules.Rule; +import org.apache.doris.nereids.rules.RuleType; + +/** + * SELECT * FROM lineorder ORDER BY 'f' -> SELECT * FROM lineorder + */ +public class EliminateSortUnderSubquery extends OneRewriteRuleFactory { + @Override + public Rule build() { + return logicalSubQueryAlias(logicalSort()) + .then(subq -> subq.withChildren(subq.child().child(0))) + .toRule(RuleType.ELIMINATE_ORDER_BY_CONSTANT); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/ExtractAndNormalizeWindowExpression.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/ExtractAndNormalizeWindowExpression.java index 5181c978edcde3..f45f7a287e8e9d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/ExtractAndNormalizeWindowExpression.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/ExtractAndNormalizeWindowExpression.java @@ -19,6 +19,7 @@ import org.apache.doris.nereids.rules.Rule; import org.apache.doris.nereids.rules.RuleType; +import org.apache.doris.nereids.rules.rewrite.NormalizeToSlot.NormalizeToSlotContext; import org.apache.doris.nereids.trees.expressions.Alias; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; @@ -34,6 +35,7 @@ import java.util.List; import java.util.Set; +import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -44,7 +46,22 @@ public class ExtractAndNormalizeWindowExpression extends OneRewriteRuleFactory i @Override public Rule build() { return logicalProject().when(project -> containsWindowExpression(project.getProjects())).then(project -> { - List outputs = project.getProjects(); + List outputs = + ExpressionUtils.rewriteDownShortCircuit(project.getProjects(), output -> { + if (output instanceof WindowExpression) { + // remove literal partition by and order by keys + WindowExpression windowExpression = (WindowExpression) output; + return windowExpression.withPartitionKeysOrderKeys( + windowExpression.getPartitionKeys().stream() + .filter(expression -> !expression.isConstant()) + .collect(Collectors.toList()), + windowExpression.getOrderKeys().stream() + .filter(orderExpression -> !orderExpression + .getOrderKey().getExpr().isConstant()) + .collect(Collectors.toList())); + } + return output; + }); // 1. handle bottom projects Set existedAlias = ExpressionUtils.collect(outputs, Alias.class::isInstance); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PushdownExpressionsInHashCondition.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PushdownExpressionsInHashCondition.java index 05da591526cd83..df7acb4553c6ae 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PushdownExpressionsInHashCondition.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PushdownExpressionsInHashCondition.java @@ -20,7 +20,7 @@ import org.apache.doris.nereids.rules.Rule; import org.apache.doris.nereids.rules.RuleType; import org.apache.doris.nereids.trees.expressions.Alias; -import org.apache.doris.nereids.trees.expressions.EqualTo; +import org.apache.doris.nereids.trees.expressions.EqualPredicate; import org.apache.doris.nereids.trees.expressions.ExprId; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; @@ -77,11 +77,10 @@ public Rule build() { Set rightProjectExprs = Sets.newHashSet(); Map exprReplaceMap = Maps.newHashMap(); join.getHashJoinConjuncts().forEach(conjunct -> { - Preconditions.checkArgument(conjunct instanceof EqualTo); + Preconditions.checkArgument(conjunct instanceof EqualPredicate); // sometimes: t1 join t2 on t2.a + 1 = t1.a + 2, so check the situation, but actually it // doesn't swap the two sides. - conjunct = JoinUtils.swapEqualToForChildrenOrder( - (EqualTo) conjunct, join.left().getOutputSet()); + conjunct = JoinUtils.swapEqualToForChildrenOrder((EqualPredicate) conjunct, join.left().getOutputSet()); generateReplaceMapAndProjectExprs(conjunct.child(0), exprReplaceMap, leftProjectExprs); generateReplaceMapAndProjectExprs(conjunct.child(1), exprReplaceMap, rightProjectExprs); }); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/mv/AbstractSelectMaterializedIndexRule.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/mv/AbstractSelectMaterializedIndexRule.java index 012dec4c91cf49..c1550cb5bd5d5b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/mv/AbstractSelectMaterializedIndexRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/mv/AbstractSelectMaterializedIndexRule.java @@ -27,13 +27,12 @@ import org.apache.doris.nereids.trees.expressions.CaseWhen; import org.apache.doris.nereids.trees.expressions.Cast; import org.apache.doris.nereids.trees.expressions.ComparisonPredicate; -import org.apache.doris.nereids.trees.expressions.EqualTo; +import org.apache.doris.nereids.trees.expressions.EqualPredicate; import org.apache.doris.nereids.trees.expressions.ExprId; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.InPredicate; import org.apache.doris.nereids.trees.expressions.IsNull; import org.apache.doris.nereids.trees.expressions.NamedExpression; -import org.apache.doris.nereids.trees.expressions.NullSafeEqual; import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.expressions.SlotReference; import org.apache.doris.nereids.trees.expressions.WhenClause; @@ -306,7 +305,7 @@ public PrefixIndexCheckResult visitInPredicate(InPredicate in, Map context) { - if (cp instanceof EqualTo || cp instanceof NullSafeEqual) { + if (cp instanceof EqualPredicate) { return check(cp, context, PrefixIndexCheckResult::createEqual); } else { return check(cp, context, PrefixIndexCheckResult::createNonEqual); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/FilterEstimation.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/FilterEstimation.java index f06c9d1cc4f4ee..a412ff375fd63e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/FilterEstimation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/FilterEstimation.java @@ -23,6 +23,7 @@ import org.apache.doris.nereids.trees.expressions.And; import org.apache.doris.nereids.trees.expressions.ComparisonPredicate; import org.apache.doris.nereids.trees.expressions.CompoundPredicate; +import org.apache.doris.nereids.trees.expressions.EqualPredicate; import org.apache.doris.nereids.trees.expressions.EqualTo; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.GreaterThan; @@ -33,7 +34,6 @@ import org.apache.doris.nereids.trees.expressions.LessThanEqual; import org.apache.doris.nereids.trees.expressions.Like; import org.apache.doris.nereids.trees.expressions.Not; -import org.apache.doris.nereids.trees.expressions.NullSafeEqual; import org.apache.doris.nereids.trees.expressions.Or; import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.expressions.SlotReference; @@ -210,7 +210,7 @@ private Statistics calculateWhenLiteralRight(ComparisonPredicate cp, return context.statistics.withSel(DEFAULT_INEQUALITY_COEFFICIENT); } - if (cp instanceof EqualTo || cp instanceof NullSafeEqual) { + if (cp instanceof EqualPredicate) { return estimateEqualTo(cp, statsForLeft, statsForRight, context); } else { if (cp instanceof LessThan || cp instanceof LessThanEqual) { @@ -255,7 +255,7 @@ private Statistics calculateWhenBothColumn(ComparisonPredicate cp, EstimationCon ColumnStatistic statsForLeft, ColumnStatistic statsForRight) { Expression left = cp.left(); Expression right = cp.right(); - if (cp instanceof EqualTo || cp instanceof NullSafeEqual) { + if (cp instanceof EqualPredicate) { return estimateColumnEqualToColumn(left, statsForLeft, right, statsForRight, context); } if (cp instanceof GreaterThan || cp instanceof GreaterThanEqual) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/JoinEstimation.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/JoinEstimation.java index 800886c177f242..f9d25cab171c9e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/JoinEstimation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/JoinEstimation.java @@ -19,7 +19,7 @@ import org.apache.doris.nereids.exceptions.AnalysisException; import org.apache.doris.nereids.trees.expressions.Cast; -import org.apache.doris.nereids.trees.expressions.EqualTo; +import org.apache.doris.nereids.trees.expressions.EqualPredicate; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.plans.JoinType; @@ -45,14 +45,14 @@ public class JoinEstimation { private static double DEFAULT_ANTI_JOIN_SELECTIVITY_COEFFICIENT = 0.3; - private static EqualTo normalizeHashJoinCondition(EqualTo equalTo, Statistics leftStats, Statistics rightStats) { - boolean changeOrder = equalTo.left().getInputSlots().stream().anyMatch( - slot -> rightStats.findColumnStatistics(slot) != null - ); + private static EqualPredicate normalizeHashJoinCondition(EqualPredicate equal, Statistics leftStats, + Statistics rightStats) { + boolean changeOrder = equal.left().getInputSlots().stream() + .anyMatch(slot -> rightStats.findColumnStatistics(slot) != null); if (changeOrder) { - return new EqualTo(equalTo.right(), equalTo.left()); + return equal.commute(); } else { - return equalTo; + return equal; } } @@ -81,18 +81,18 @@ private static Statistics estimateHashJoin(Statistics leftStats, Statistics righ * In order to avoid error propagation, for unTrustEquations, we only use the biggest selectivity. */ List unTrustEqualRatio = Lists.newArrayList(); - List unTrustableCondition = Lists.newArrayList(); + List unTrustableCondition = Lists.newArrayList(); boolean leftBigger = leftStats.getRowCount() > rightStats.getRowCount(); double rightStatsRowCount = StatsMathUtil.nonZeroDivisor(rightStats.getRowCount()); double leftStatsRowCount = StatsMathUtil.nonZeroDivisor(leftStats.getRowCount()); - List trustableConditions = join.getHashJoinConjuncts().stream() - .map(expression -> (EqualTo) expression) + List trustableConditions = join.getHashJoinConjuncts().stream() + .map(expression -> (EqualPredicate) expression) .filter( expression -> { // since ndv is not accurate, if ndv/rowcount < almostUniqueThreshold, // this column is regarded as unique. double almostUniqueThreshold = 0.9; - EqualTo equal = normalizeHashJoinCondition(expression, leftStats, rightStats); + EqualPredicate equal = normalizeHashJoinCondition(expression, leftStats, rightStats); ColumnStatistic eqLeftColStats = ExpressionEstimation.estimate(equal.left(), leftStats); ColumnStatistic eqRightColStats = ExpressionEstimation.estimate(equal.right(), rightStats); boolean trustable = eqRightColStats.ndv / rightStatsRowCount > almostUniqueThreshold @@ -189,7 +189,7 @@ private static double estimateJoinConditionSel(Statistics crossJoinStats, Expres } private static double estimateSemiOrAntiRowCountBySlotsEqual(Statistics leftStats, - Statistics rightStats, Join join, EqualTo equalTo) { + Statistics rightStats, Join join, EqualPredicate equalTo) { Expression eqLeft = equalTo.left(); Expression eqRight = equalTo.right(); ColumnStatistic probColStats = leftStats.findColumnStatistics(eqLeft); @@ -246,7 +246,7 @@ private static Statistics estimateSemiOrAnti(Statistics leftStats, Statistics ri double rowCount = Double.POSITIVE_INFINITY; for (Expression conjunct : join.getHashJoinConjuncts()) { double eqRowCount = estimateSemiOrAntiRowCountBySlotsEqual(leftStats, rightStats, - join, (EqualTo) conjunct); + join, (EqualPredicate) conjunct); if (rowCount > eqRowCount) { rowCount = eqRowCount; } @@ -321,7 +321,7 @@ public static Statistics estimate(Statistics leftStats, Statistics rightStats, J private static Statistics updateJoinResultStatsByHashJoinCondition(Statistics innerStats, Join join) { Map updatedCols = new HashMap<>(); for (Expression expr : join.getHashJoinConjuncts()) { - EqualTo equalTo = (EqualTo) expr; + EqualPredicate equalTo = (EqualPredicate) expr; ColumnStatistic leftColStats = ExpressionEstimation.estimate(equalTo.left(), innerStats); ColumnStatistic rightColStats = ExpressionEstimation.estimate(equalTo.right(), innerStats); double minNdv = Math.min(leftColStats.ndv, rightColStats.ndv); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualPredicate.java new file mode 100644 index 00000000000000..3f61bd3cf621a5 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualPredicate.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions; + +import java.util.List; + +/** + * EqualPredicate + */ +public abstract class EqualPredicate extends ComparisonPredicate { + + protected EqualPredicate(List children, String symbol) { + super(children, symbol); + } + + @Override + public EqualPredicate commute() { + return null; + } +} + diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualTo.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualTo.java index 0fa23a57e0a310..1e72a006057462 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualTo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualTo.java @@ -29,7 +29,7 @@ /** * Equal to expression: a = b. */ -public class EqualTo extends ComparisonPredicate implements PropagateNullable { +public class EqualTo extends EqualPredicate implements PropagateNullable { public EqualTo(Expression left, Expression right) { super(ImmutableList.of(left, right), "="); @@ -60,7 +60,7 @@ public R accept(ExpressionVisitor visitor, C context) { } @Override - public ComparisonPredicate commute() { + public EqualTo commute() { return new EqualTo(right(), left()); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/NullSafeEqual.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/NullSafeEqual.java index c2b63aebbd793a..48d05364fa3441 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/NullSafeEqual.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/NullSafeEqual.java @@ -29,13 +29,7 @@ * Null safe equal expression: a <=> b. * Unlike normal equal to expression, null <=> null is true. */ -public class NullSafeEqual extends ComparisonPredicate implements AlwaysNotNullable { - /** - * Constructor of Null Safe Equal ComparisonPredicate. - * - * @param left left child of Null Safe Equal - * @param right right child of Null Safe Equal - */ +public class NullSafeEqual extends EqualPredicate implements AlwaysNotNullable { public NullSafeEqual(Expression left, Expression right) { super(ImmutableList.of(left, right), "<=>"); } @@ -61,8 +55,7 @@ public NullSafeEqual withChildren(List children) { } @Override - public ComparisonPredicate commute() { + public NullSafeEqual commute() { return new NullSafeEqual(right(), left()); } - } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/AbstractPhysicalJoin.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/AbstractPhysicalJoin.java index f67123522c3daf..a39634917aae61 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/AbstractPhysicalJoin.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/AbstractPhysicalJoin.java @@ -20,6 +20,7 @@ import org.apache.doris.nereids.memo.GroupExpression; import org.apache.doris.nereids.properties.LogicalProperties; import org.apache.doris.nereids.properties.PhysicalProperties; +import org.apache.doris.nereids.trees.expressions.EqualTo; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.MarkJoinSlotReference; import org.apache.doris.nereids.trees.expressions.Slot; @@ -41,6 +42,7 @@ import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.stream.Collectors; /** * Abstract class for all physical join node. @@ -109,6 +111,11 @@ public List getHashJoinConjuncts() { return hashJoinConjuncts; } + public List getEqualToConjuncts() { + return hashJoinConjuncts.stream().filter(EqualTo.class::isInstance).map(EqualTo.class::cast) + .collect(Collectors.toList()); + } + public boolean isShouldTranslateOutput() { return shouldTranslateOutput; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/util/JoinUtils.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/util/JoinUtils.java index eda7d2e6ad1fd3..25f84c096c8e07 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/util/JoinUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/util/JoinUtils.java @@ -24,7 +24,7 @@ import org.apache.doris.nereids.properties.DistributionSpecHash; import org.apache.doris.nereids.properties.DistributionSpecHash.ShuffleType; import org.apache.doris.nereids.properties.DistributionSpecReplicated; -import org.apache.doris.nereids.trees.expressions.EqualTo; +import org.apache.doris.nereids.trees.expressions.EqualPredicate; import org.apache.doris.nereids.trees.expressions.ExprId; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.Not; @@ -61,31 +61,18 @@ public static boolean couldBroadcast(Join join) { return !(join.getJoinType().isRightJoin() || join.getJoinType().isFullOuterJoin()); } - private static final class JoinSlotCoverageChecker { + /** + * JoinSlotCoverageChecker + */ + public static final class JoinSlotCoverageChecker { Set leftExprIds; Set rightExprIds; - JoinSlotCoverageChecker(List left, List right) { + public JoinSlotCoverageChecker(List left, List right) { leftExprIds = left.stream().map(Slot::getExprId).collect(Collectors.toSet()); rightExprIds = right.stream().map(Slot::getExprId).collect(Collectors.toSet()); } - JoinSlotCoverageChecker(Set left, Set right) { - leftExprIds = left; - rightExprIds = right; - } - - /** - * PushDownExpressionInHashConjuncts ensure the "slots" is only one slot. - */ - boolean isCoveredByLeftSlots(ExprId slot) { - return leftExprIds.contains(slot); - } - - boolean isCoveredByRightSlots(ExprId slot) { - return rightExprIds.contains(slot); - } - /** * consider following cases: * 1# A=1 => not for hash table @@ -94,25 +81,20 @@ boolean isCoveredByRightSlots(ExprId slot) { * 4# t1.a=t2.a or t1.b=t2.b not for hash table * 5# t1.a > 1 not for hash table * - * @param equalTo a conjunct in on clause condition + * @param equal a conjunct in on clause condition * @return true if the equal can be used as hash join condition */ - boolean isHashJoinCondition(EqualTo equalTo) { - Set equalLeft = equalTo.left().collect(Slot.class::isInstance); - if (equalLeft.isEmpty()) { + public boolean isHashJoinCondition(EqualPredicate equal) { + Set equalLeftExprIds = equal.left().getInputSlotExprIds(); + if (equalLeftExprIds.isEmpty()) { return false; } - Set equalRight = equalTo.right().collect(Slot.class::isInstance); - if (equalRight.isEmpty()) { + Set equalRightExprIds = equal.right().getInputSlotExprIds(); + if (equalRightExprIds.isEmpty()) { return false; } - List equalLeftExprIds = equalLeft.stream() - .map(Slot::getExprId).collect(Collectors.toList()); - - List equalRightExprIds = equalRight.stream() - .map(Slot::getExprId).collect(Collectors.toList()); return leftExprIds.containsAll(equalLeftExprIds) && rightExprIds.containsAll(equalRightExprIds) || leftExprIds.containsAll(equalRightExprIds) && rightExprIds.containsAll(equalLeftExprIds); } @@ -129,9 +111,8 @@ boolean isHashJoinCondition(EqualTo equalTo) { public static Pair, List> extractExpressionForHashTable(List leftSlots, List rightSlots, List onConditions) { JoinSlotCoverageChecker checker = new JoinSlotCoverageChecker(leftSlots, rightSlots); - Map> mapper = onConditions.stream() - .collect(Collectors.groupingBy( - expr -> (expr instanceof EqualTo) && checker.isHashJoinCondition((EqualTo) expr))); + Map> mapper = onConditions.stream().collect(Collectors.groupingBy( + expr -> (expr instanceof EqualPredicate) && checker.isHashJoinCondition((EqualPredicate) expr))); return Pair.of( mapper.getOrDefault(true, ImmutableList.of()), mapper.getOrDefault(false, ImmutableList.of()) @@ -187,7 +168,7 @@ public static boolean shouldNestedLoopJoin(JoinType joinType, List h * The left child of origin predicate is t2.id and the right child of origin predicate is t1.id. * In this situation, the children of predicate need to be swap => t1.id=t2.id. */ - public static Expression swapEqualToForChildrenOrder(EqualTo equalTo, Set leftOutput) { + public static EqualPredicate swapEqualToForChildrenOrder(EqualPredicate equalTo, Set leftOutput) { if (leftOutput.containsAll(equalTo.left().getInputSlots())) { return equalTo; } else { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java index 433fe76fd9ede1..f3a865b051f39a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java @@ -313,9 +313,9 @@ private SortInfo createSortInfo( SlotDescriptor sortSlotDesc = analyzer.getDescTbl().addSlotDescriptor(sortTupleDesc); if (inputSlotDesc.getColumn() != null) { sortSlotDesc.setColumn(inputSlotDesc.getColumn()); - } else { - sortSlotDesc.setType(inputSlotDesc.getType()); } + // always set type as inputSlotDesc's type + sortSlotDesc.setType(inputSlotDesc.getType()); // all output slots need to be materialized sortSlotDesc.setIsMaterialized(true); sortSlotDesc.setIsNullable(inputSlotDesc.getIsNullable()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java index 1ba77fa5f9c09f..d41418104871a9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java @@ -196,8 +196,14 @@ protected List getSplits() throws UserException { HiveMetaStoreCache cache = Env.getCurrentEnv().getExtMetaCacheMgr() .getMetaStoreCache((HMSExternalCatalog) hmsTable.getCatalog()); boolean useSelfSplitter = hmsTable.getCatalog().useSelfSplitter(); + String bindBrokerName = hmsTable.getCatalog().bindBrokerName(); + if (bindBrokerName != null && useSelfSplitter == false) { + // useSelfSplitter must be true if bindBrokerName is set. + throw new UserException(HMSExternalCatalog.ENABLE_SELF_SPLITTER + " should be true if " + + HMSExternalCatalog.BIND_BROKER_NAME + " is set"); + } List allFiles = Lists.newArrayList(); - getFileSplitByPartitions(cache, getPartitions(), allFiles, useSelfSplitter); + getFileSplitByPartitions(cache, getPartitions(), allFiles, useSelfSplitter, bindBrokerName); LOG.debug("get #{} files for table: {}.{}, cost: {} ms", allFiles.size(), hmsTable.getDbName(), hmsTable.getName(), (System.currentTimeMillis() - start)); return allFiles; @@ -210,12 +216,13 @@ protected List getSplits() throws UserException { } private void getFileSplitByPartitions(HiveMetaStoreCache cache, List partitions, - List allFiles, boolean useSelfSplitter) throws IOException { + List allFiles, boolean useSelfSplitter, + String bindBrokerName) throws IOException { List fileCaches; if (hiveTransaction != null) { - fileCaches = getFileSplitByTransaction(cache, partitions); + fileCaches = getFileSplitByTransaction(cache, partitions, bindBrokerName); } else { - fileCaches = cache.getFilesByPartitionsWithCache(partitions, useSelfSplitter); + fileCaches = cache.getFilesByPartitionsWithCache(partitions, useSelfSplitter, bindBrokerName); } if (ConnectContext.get().getExecutor() != null) { ConnectContext.get().getExecutor().getSummaryProfile().setGetPartitionFilesFinishTime(); @@ -287,7 +294,8 @@ private List selectFiles(List return fileList.subList(0, index); } - private List getFileSplitByTransaction(HiveMetaStoreCache cache, List partitions) { + private List getFileSplitByTransaction(HiveMetaStoreCache cache, List partitions, + String bindBrokerName) { for (HivePartition partition : partitions) { if (partition.getPartitionValues() == null || partition.getPartitionValues().isEmpty()) { // this is unpartitioned table. @@ -297,7 +305,8 @@ private List getFileSplitByTransaction(HiveMetaStoreCache cache, } ValidWriteIdList validWriteIds = hiveTransaction.getValidWriteIds( ((HMSExternalCatalog) hmsTable.getCatalog()).getClient()); - return cache.getFilesByTransaction(partitions, validWriteIds, hiveTransaction.isFullAcid(), hmsTable.getId()); + return cache.getFilesByTransaction(partitions, validWriteIds, + hiveTransaction.isFullAcid(), hmsTable.getId(), bindBrokerName); } @Override @@ -319,6 +328,10 @@ protected TFileType getLocationType() throws UserException { @Override protected TFileType getLocationType(String location) throws UserException { + String bindBrokerName = hmsTable.getCatalog().bindBrokerName(); + if (bindBrokerName != null) { + return TFileType.FILE_BROKER; + } return getTFileType(location).orElseThrow(() -> new DdlException("Unknown file location " + location + " for hms table " + hmsTable.getName())); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/jdbc/JdbcScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/jdbc/JdbcScanNode.java index 4eccaff47741ab..23a44ed643263e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/jdbc/JdbcScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/jdbc/JdbcScanNode.java @@ -138,7 +138,7 @@ private void createJdbcFilters() { List pushDownConjuncts = collectConjunctsToPushDown(conjunctsList, errors); for (Expr individualConjunct : pushDownConjuncts) { - String filter = conjunctExprToString(jdbcType, individualConjunct, tbl); + String filter = conjunctExprToString(jdbcType, individualConjunct); filters.add(filter); conjuncts.remove(individualConjunct); } @@ -169,9 +169,9 @@ private void createJdbcColumns() { continue; } Column col = slot.getColumn(); - columns.add(tbl.getProperRealColumnName(jdbcType, col.getName())); + columns.add(JdbcTable.databaseProperName(jdbcType, col.getName())); } - if (columns.isEmpty()) { + if (0 == columns.size()) { columns.add("*"); } } @@ -324,12 +324,12 @@ private static boolean containsFunctionCallExpr(Expr expr) { return !fnExprList.isEmpty(); } - public static String conjunctExprToString(TOdbcTableType tableType, Expr expr, JdbcTable tbl) { + public static String conjunctExprToString(TOdbcTableType tableType, Expr expr) { if (expr instanceof CompoundPredicate) { StringBuilder result = new StringBuilder(); CompoundPredicate compoundPredicate = (CompoundPredicate) expr; for (Expr child : compoundPredicate.getChildren()) { - result.append(conjunctExprToString(tableType, child, tbl)); + result.append(conjunctExprToString(tableType, child)); result.append(" ").append(compoundPredicate.getOp().toString()).append(" "); } // Remove the last operator @@ -357,11 +357,7 @@ public static String conjunctExprToString(TOdbcTableType tableType, Expr expr, J ArrayList children = expr.getChildren(); String filter; if (children.get(0) instanceof SlotRef) { - if (tbl != null) { - filter = tbl.getProperRealColumnName(tableType, children.get(0).toMySql()); - } else { - filter = JdbcTable.databaseProperName(tableType, children.get(0).toMySql()); - } + filter = JdbcTable.databaseProperName(tableType, children.get(0).toMySql()); } else { filter = children.get(0).toMySql(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/odbc/OdbcScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/odbc/OdbcScanNode.java index 68950ebab833cc..bf4e835e4f1035 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/odbc/OdbcScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/odbc/OdbcScanNode.java @@ -184,7 +184,7 @@ private void createOdbcFilters(Analyzer analyzer) { ArrayList odbcConjuncts = Expr.cloneList(conjuncts, sMap); for (Expr p : odbcConjuncts) { if (shouldPushDownConjunct(odbcType, p)) { - String filter = JdbcScanNode.conjunctExprToString(odbcType, p, null); + String filter = JdbcScanNode.conjunctExprToString(odbcType, p); filters.add(filter); conjuncts.remove(p); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java index 792d00bd63d96a..938282c7644295 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java @@ -2610,6 +2610,15 @@ public TGetBinlogResult getBinlog(TGetBinlogRequest request) throws TException { TGetBinlogResult result = new TGetBinlogResult(); TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); + + if (!Env.getCurrentEnv().isMaster()) { + status.setStatusCode(TStatusCode.NOT_MASTER); + status.addToErrorMsgs(NOT_MASTER_ERR_MSG); + result.setMasterAddress(getMasterAddress()); + LOG.error("failed to get beginTxn: {}", NOT_MASTER_ERR_MSG); + return result; + } + try { result = getBinlogImpl(request, clientAddr); } catch (UserException e) { @@ -2955,6 +2964,14 @@ public TGetBinlogLagResult getBinlogLag(TGetBinlogRequest request) throws TExcep TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); + if (!Env.getCurrentEnv().isMaster()) { + status.setStatusCode(TStatusCode.NOT_MASTER); + status.addToErrorMsgs(NOT_MASTER_ERR_MSG); + result.setMasterAddress(getMasterAddress()); + LOG.error("failed to get beginTxn: {}", NOT_MASTER_ERR_MSG); + return result; + } + try { result = getBinlogLagImpl(request, clientAddr); } catch (UserException e) { @@ -3063,6 +3080,15 @@ public TGetMetaResult getMeta(TGetMetaRequest request) throws TException { TGetMetaResult result = new TGetMetaResult(); TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); + + if (!Env.getCurrentEnv().isMaster()) { + status.setStatusCode(TStatusCode.NOT_MASTER); + status.addToErrorMsgs(NOT_MASTER_ERR_MSG); + result.setMasterAddress(getMasterAddress()); + LOG.error("failed to get beginTxn: {}", NOT_MASTER_ERR_MSG); + return result; + } + try { result = getMetaImpl(request, clientAddr); } catch (UserException e) { @@ -3163,6 +3189,15 @@ public TGetBackendMetaResult getBackendMeta(TGetBackendMetaRequest request) thro TGetBackendMetaResult result = new TGetBackendMetaResult(); TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); + + if (!Env.getCurrentEnv().isMaster()) { + status.setStatusCode(TStatusCode.NOT_MASTER); + status.addToErrorMsgs(NOT_MASTER_ERR_MSG); + result.setMasterAddress(getMasterAddress()); + LOG.error("failed to get beginTxn: {}", NOT_MASTER_ERR_MSG); + return result; + } + try { result = getBackendMetaImpl(request, clientAddr); } catch (UserException e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java index 927e556c91af67..f2c77026312d6d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java @@ -702,7 +702,8 @@ public static List getFilesForPartitions( table.getRemoteTable().getSd().getLocation(), null)); } // Get files for all partitions. - return cache.getFilesByPartitionsWithoutCache(hivePartitions, true); + String bindBrokerName = table.getCatalog().bindBrokerName(); + return cache.getFilesByPartitionsWithoutCache(hivePartitions, true, bindBrokerName); } /** diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/EliminateSortTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/EliminateSortTest.java index fc88287d1092a8..cc039cbabb195d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/EliminateSortTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/EliminateSortTest.java @@ -26,7 +26,7 @@ /** * column prune ut. */ -public class EliminateSortTest extends TestWithFeService implements MemoPatternMatchSupported { +class EliminateSortTest extends TestWithFeService implements MemoPatternMatchSupported { @Override protected void runBeforeAll() throws Exception { createDatabase("test"); @@ -37,7 +37,7 @@ protected void runBeforeAll() throws Exception { } @Test - public void test() { + void test() { PlanChecker.from(connectContext) .analyze("select * from student order by id") .rewrite() @@ -47,4 +47,27 @@ public void test() { .rewrite() .nonMatch(logicalSort()); } + + @Test + void testSortLimit() { + PlanChecker.from(connectContext) + .analyze("select count(*) from (select * from student order by id) t limit 1") + .rewrite() + .nonMatch(logicalTopN()); + PlanChecker.from(connectContext) + .analyze("select count(*) from (select * from student order by id limit 1) t") + .rewrite() + .matches(logicalTopN()); + + PlanChecker.from(connectContext) + .analyze("select count(*) from " + + "(select * from student order by id limit 1) t1 left join student t2 on t1.id = t2.id") + .rewrite() + .matches(logicalTopN()); + PlanChecker.from(connectContext) + .analyze("select count(*) from " + + "(select * from student order by id) t1 left join student t2 on t1.id = t2.id limit 1") + .rewrite() + .nonMatch(logicalTopN()); + } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanPartitionTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanPartitionTest.java index 85fde57b517fa2..de54a05bc5e8c8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanPartitionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanPartitionTest.java @@ -228,6 +228,8 @@ public void prunePartitionWithOrPredicate() { public void canNotPruneComplexPredicate() { test("test_range_parts", "(part = 10) or (part + id = 1)", 4); test("test_range_parts", "(part + id = 1) and (part = 4)", 1); + test("test_range_parts", "(part = 2) and (part <> id)", 1); + test("test_range_parts", "(part = 2) or (part <> id)", 4); } @Test diff --git a/fs_brokers/apache_hdfs_broker/pom.xml b/fs_brokers/apache_hdfs_broker/pom.xml index bbd58e5d5d4240..2cb8d892dee7f6 100644 --- a/fs_brokers/apache_hdfs_broker/pom.xml +++ b/fs_brokers/apache_hdfs_broker/pom.xml @@ -69,9 +69,10 @@ under the License. 1.8 2.18.0 github - 2.10.2 + 3.3.6 4.1.65.Final hadoop2-2.2.15 + 1.0.1 @@ -224,6 +225,29 @@ under the License. + + + org.apache.hadoop + hadoop-mapreduce-client-core + ${hadoop.version} + + + javax.servlet + servlet-api + + + io.netty + netty + + + + + + org.apache.doris + hive-catalog-shade + ${doris.hive.catalog.shade.version} + + com.fasterxml.jackson.core jackson-databind diff --git a/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/FileSystemManager.java b/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/FileSystemManager.java index 55ba457404a01e..22be82e34c2edc 100644 --- a/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/FileSystemManager.java +++ b/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/FileSystemManager.java @@ -25,17 +25,19 @@ import com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Logger; -import org.apache.hadoop.fs.CommonConfigurationKeys; import java.io.File; import java.io.FileNotFoundException; @@ -1061,6 +1063,49 @@ public BrokerFileSystem getGooseFSFileSystem(String path, Map pr } } + public List listLocatedFiles(String path, boolean onlyFiles, + boolean recursive, Map properties) { + List resultFileStatus = null; + BrokerFileSystem fileSystem = getFileSystem(path, properties); + Path locatedPath = new Path(path); + try { + FileSystem innerFileSystem = fileSystem.getDFSFileSystem(); + RemoteIterator locatedFiles = onlyFiles ? innerFileSystem.listFiles(locatedPath, recursive) + : innerFileSystem.listLocatedStatus(locatedPath); + return getFileLocations(locatedFiles); + } catch (FileNotFoundException e) { + logger.info("file not found: " + e.getMessage()); + throw new BrokerException(TBrokerOperationStatusCode.FILE_NOT_FOUND, + e, "file not found"); + } catch (Exception e) { + logger.error("errors while get file status ", e); + fileSystem.closeFileSystem(); + throw new BrokerException(TBrokerOperationStatusCode.TARGET_STORAGE_SERVICE_ERROR, + e, "unknown error when listLocatedFiles"); + } + } + + private List getFileLocations(RemoteIterator locatedFiles) throws IOException { + List locations = new ArrayList<>(); + while (locatedFiles.hasNext()) { + LocatedFileStatus fileStatus = locatedFiles.next(); + TBrokerFileStatus brokerFileStatus = new TBrokerFileStatus(); + brokerFileStatus.setPath(fileStatus.getPath().toString()); + brokerFileStatus.setIsDir(fileStatus.isDirectory()); + if (fileStatus.isDirectory()) { + brokerFileStatus.setIsSplitable(false); + brokerFileStatus.setSize(-1); + } else { + brokerFileStatus.setSize(fileStatus.getLen()); + brokerFileStatus.setIsSplitable(true); + } + brokerFileStatus.setModificationTime(fileStatus.getModificationTime()); + brokerFileStatus.setBlockSize(fileStatus.getBlockSize()); + locations.add(brokerFileStatus); + } + return locations; + } + public List listPath(String path, boolean fileNameOnly, Map properties) { List resultFileStatus = null; WildcardURI pathUri = new WildcardURI(path); @@ -1282,13 +1327,7 @@ public void pwrite(TBrokerFD fd, long offset, byte[] data) { FSDataOutputStream fsDataOutputStream = clientContextManager.getFsDataOutputStream(fd); synchronized (fsDataOutputStream) { long currentStreamOffset; - try { - currentStreamOffset = fsDataOutputStream.getPos(); - } catch (IOException e) { - logger.error("errors while get file pos from output stream", e); - throw new BrokerException(TBrokerOperationStatusCode.TARGET_STORAGE_SERVICE_ERROR, - "errors while get file pos from output stream"); - } + currentStreamOffset = fsDataOutputStream.getPos(); if (currentStreamOffset != offset) { // it's ok, it means that last pwrite succeed finally if (currentStreamOffset == offset + data.length) { diff --git a/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/HDFSBrokerServiceImpl.java b/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/HDFSBrokerServiceImpl.java index 14ff74dd41e62e..816462ecb340e7 100644 --- a/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/HDFSBrokerServiceImpl.java +++ b/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/broker/hdfs/HDFSBrokerServiceImpl.java @@ -19,6 +19,7 @@ import com.google.common.base.Stopwatch; import org.apache.doris.common.BrokerPerfMonitor; +import org.apache.doris.common.HiveUtils; import org.apache.doris.thrift.TBrokerCheckPathExistRequest; import org.apache.doris.thrift.TBrokerCheckPathExistResponse; import org.apache.doris.thrift.TBrokerCloseReaderRequest; @@ -28,6 +29,8 @@ import org.apache.doris.thrift.TBrokerFileSizeRequest; import org.apache.doris.thrift.TBrokerFileSizeResponse; import org.apache.doris.thrift.TBrokerFileStatus; +import org.apache.doris.thrift.TBrokerIsSplittableResponse; +import org.apache.doris.thrift.TBrokerIsSplittableRequest; import org.apache.doris.thrift.TBrokerListPathRequest; import org.apache.doris.thrift.TBrokerListResponse; import org.apache.doris.thrift.TBrokerOpenReaderRequest; @@ -86,6 +89,47 @@ public TBrokerListResponse listPath(TBrokerListPathRequest request) } } + @Override + public TBrokerListResponse listLocatedFiles(TBrokerListPathRequest request) + throws TException { + logger.info("received a listLocatedFiles request, request detail: " + request); + TBrokerListResponse response = new TBrokerListResponse(); + try { + boolean recursive = request.isIsRecursive(); + boolean onlyFiles = false; + if (request.isSetOnlyFiles()) { + onlyFiles = request.isOnlyFiles(); + } + List fileStatuses = fileSystemManager.listLocatedFiles(request.path, + onlyFiles, recursive, request.properties); + response.setOpStatus(generateOKStatus()); + response.setFiles(fileStatuses); + return response; + } catch (BrokerException e) { + logger.warn("failed to list path: " + request.path, e); + TBrokerOperationStatus errorStatus = e.generateFailedOperationStatus(); + response.setOpStatus(errorStatus); + return response; + } + } + + @Override + public TBrokerIsSplittableResponse isSplittable(TBrokerIsSplittableRequest request) throws TException { + logger.info("received a isSplittable request, request detail: " + request); + TBrokerIsSplittableResponse response = new TBrokerIsSplittableResponse(); + try { + boolean isSplittable = HiveUtils.isSplittable(request.path, request.inputFormat, request.properties); + response.setOpStatus(generateOKStatus()); + response.setSplittable(isSplittable); + return response; + } catch (BrokerException e) { + logger.warn("failed to get isSplitable with path: " + request.path, e); + TBrokerOperationStatus errorStatus = e.generateFailedOperationStatus(); + response.setOpStatus(errorStatus); + return response; + } + } + @Override public TBrokerOperationStatus deletePath(TBrokerDeletePathRequest request) throws TException { diff --git a/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/common/HiveUtils.java b/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/common/HiveUtils.java new file mode 100644 index 00000000000000..f2211eb202662a --- /dev/null +++ b/fs_brokers/apache_hdfs_broker/src/main/java/org/apache/doris/common/HiveUtils.java @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.common; + +import org.apache.doris.broker.hdfs.BrokerException; +import org.apache.doris.thrift.TBrokerOperationStatusCode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat; +import org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.TextInputFormat; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.log4j.Logger; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Map; + +public class HiveUtils { + private static final Logger logger = Logger.getLogger(HiveUtils.class.getName()); + + public static boolean isSplittable(String path, String inputFormatName, + Map properties) throws BrokerException { + JobConf jobConf = getJobConf(properties); + InputFormat inputFormat = getInputFormat(jobConf, inputFormatName); + return isSplittableInternal(inputFormat, new Path(path), jobConf); + } + + private static JobConf getJobConf(Map properties) { + Configuration configuration = new Configuration(); + for (Map.Entry entry : properties.entrySet()) { + configuration.set(entry.getKey(), entry.getValue()); + } + return new JobConf(configuration); + } + + private static InputFormat getInputFormat(JobConf jobConf, String inputFormatName) throws BrokerException { + try { + Class> inputFormatClass = getInputFormatClass(jobConf, inputFormatName); + if (inputFormatClass == SymlinkTextInputFormat.class) { + // symlink targets are always TextInputFormat + inputFormatClass = TextInputFormat.class; + } + + return ReflectionUtils.newInstance(inputFormatClass, jobConf); + } catch (ClassNotFoundException | RuntimeException e) { + throw new BrokerException(TBrokerOperationStatusCode.INVALID_ARGUMENT, + "Unable to create input format " + inputFormatName, e); + } + } + + @SuppressWarnings({"unchecked", "RedundantCast"}) + private static Class> getInputFormatClass(JobConf conf, String inputFormatName) + throws ClassNotFoundException { + // CDH uses different names for Parquet + if ("parquet.hive.DeprecatedParquetInputFormat".equals(inputFormatName) + || "parquet.hive.MapredParquetInputFormat".equals(inputFormatName)) { + return MapredParquetInputFormat.class; + } + + Class clazz = conf.getClassByName(inputFormatName); + return (Class>) clazz.asSubclass(InputFormat.class); + } + + private static boolean isSplittableInternal(InputFormat inputFormat, Path path, JobConf jobConf) { + // ORC uses a custom InputFormat but is always splittable + if (inputFormat.getClass().getSimpleName().equals("OrcInputFormat")) { + return true; + } + + // use reflection to get isSplittable method on FileInputFormat + Method method = null; + for (Class clazz = inputFormat.getClass(); clazz != null; clazz = clazz.getSuperclass()) { + try { + method = clazz.getDeclaredMethod("isSplitable", FileSystem.class, Path.class); + break; + } catch (NoSuchMethodException ignored) { + logger.warn(LoggerMessageFormat.format("Class {} doesn't contain isSplitable method", clazz)); + } + } + + if (method == null) { + return false; + } + try { + method.setAccessible(true); + return (boolean) method.invoke(inputFormat, path.getFileSystem(jobConf), path); + } catch (InvocationTargetException | IllegalAccessException | IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/gensrc/thrift/FrontendService.thrift b/gensrc/thrift/FrontendService.thrift index f2769add6998f5..3d881099de7d0e 100644 --- a/gensrc/thrift/FrontendService.thrift +++ b/gensrc/thrift/FrontendService.thrift @@ -1015,6 +1015,7 @@ struct TGetBinlogResult { 3: optional list binlogs 4: optional string fe_version 5: optional i64 fe_meta_version + 6: optional Types.TNetworkAddress master_address } struct TGetTabletReplicaInfosRequest { @@ -1093,6 +1094,7 @@ typedef TGetBinlogRequest TGetBinlogLagRequest struct TGetBinlogLagResult { 1: optional Status.TStatus status 2: optional i64 lag + 3: optional Types.TNetworkAddress master_address } struct TUpdateFollowerStatsCacheRequest { @@ -1191,6 +1193,7 @@ struct TGetMetaDBMeta { struct TGetMetaResult { 1: required Status.TStatus status 2: optional TGetMetaDBMeta db_meta + 3: optional Types.TNetworkAddress master_address } struct TGetBackendMetaRequest { @@ -1205,6 +1208,7 @@ struct TGetBackendMetaRequest { struct TGetBackendMetaResult { 1: required Status.TStatus status 2: optional list backends + 3: optional Types.TNetworkAddress master_address } service FrontendService { diff --git a/gensrc/thrift/PaloBrokerService.thrift b/gensrc/thrift/PaloBrokerService.thrift index 308c606544e939..e4bc60a201fa3e 100644 --- a/gensrc/thrift/PaloBrokerService.thrift +++ b/gensrc/thrift/PaloBrokerService.thrift @@ -91,12 +91,25 @@ struct TBrokerCheckPathExistResponse { 2: required bool isPathExist; } +struct TBrokerIsSplittableResponse { + 1: optional TBrokerOperationStatus opStatus; + 2: optional bool splittable; +} + struct TBrokerListPathRequest { 1: required TBrokerVersion version; 2: required string path; 3: required bool isRecursive; 4: required map properties; 5: optional bool fileNameOnly; + 6: optional bool onlyFiles; +} + +struct TBrokerIsSplittableRequest { + 1: optional TBrokerVersion version; + 2: optional string path; + 3: optional string inputFormat; + 4: optional map properties; } struct TBrokerDeletePathRequest { @@ -184,6 +197,13 @@ service TPaloBrokerService { // return a list of files under a path TBrokerListResponse listPath(1: TBrokerListPathRequest request); + + // return located files of a given path. A broker implementation refers to + // 'org.apache.doris.fs.remote.RemoteFileSystem#listLocatedFiles' in fe-core. + TBrokerListResponse listLocatedFiles(1: TBrokerListPathRequest request); + + // return whether the path with specified input format is splittable. + TBrokerIsSplittableResponse isSplittable(1: TBrokerIsSplittableRequest request); // delete a file, if the deletion of the file fails, the status code will return an error message // input: diff --git a/regression-test/data/correctness_p0/test_distinct_agg.out b/regression-test/data/correctness_p0/test_distinct_agg.out index b70da182eeface..d75f85b923cfd5 100644 --- a/regression-test/data/correctness_p0/test_distinct_agg.out +++ b/regression-test/data/correctness_p0/test_distinct_agg.out @@ -1,4 +1,15 @@ -- This file is automatically generated. You should know what you did if you want to edit this +-- !distinct_1 -- + -- !select1 -- 本日 +-- !multi_distinct_1 -- +2 2 + +-- !multi_distinct_2 -- +369 1145 + +-- !multi_distinct_3 -- +184.5 572.5 + diff --git a/regression-test/data/correctness_p0/test_inlineview_with_window_function.out b/regression-test/data/correctness_p0/test_inlineview_with_window_function.out index 281bf5dd1b2e76..651e03f982f96d 100644 --- a/regression-test/data/correctness_p0/test_inlineview_with_window_function.out +++ b/regression-test/data/correctness_p0/test_inlineview_with_window_function.out @@ -2,3 +2,8 @@ -- !order -- 2023-06-10 cib2205045_1_1s 0.0000 168939.0 0.0000 0.0 0.0000 0.0 0.0000 day +-- !order2 -- +1 1 1 +2 2 2 +3 3 3 +4 4 4 diff --git a/regression-test/data/export_p0/export/test_show_export.out b/regression-test/data/export_p0/export/test_show_export.out deleted file mode 100644 index 8432bc86e70d99..00000000000000 --- a/regression-test/data/export_p0/export/test_show_export.out +++ /dev/null @@ -1,257 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !select_export1 -- -1 2017-10-01 2017-10-01T00:00 Beijing 1 1 true 1 1 1 1.1 1.1 char1 1 -10 2017-10-01 2017-10-01T00:00 Beijing 10 10 true 10 10 10 10.1 10.1 char10 10 -100 2017-10-01 2017-10-01T00:00 \N \N \N \N \N \N \N \N \N \N \N -11 2017-10-01 2017-10-01T00:00 Beijing 11 11 true 11 11 11 11.11 11.11 char11 11 -12 2017-10-01 2017-10-01T00:00 Beijing 12 12 true 12 12 12 12.12 12.12 char12 12 -13 2017-10-01 2017-10-01T00:00 Beijing 13 13 true 13 13 13 13.13 13.13 char13 13 -14 2017-10-01 2017-10-01T00:00 Beijing 14 14 true 14 14 14 14.14 14.14 char14 14 -15 2017-10-01 2017-10-01T00:00 Beijing 15 15 true 15 15 15 15.15 15.15 char15 15 -16 2017-10-01 2017-10-01T00:00 Beijing 16 16 true 16 16 16 16.16 16.16 char16 16 -17 2017-10-01 2017-10-01T00:00 Beijing 17 17 true 17 17 17 17.17 17.17 char17 17 -18 2017-10-01 2017-10-01T00:00 Beijing 18 18 true 18 18 18 18.18 18.18 char18 18 -19 2017-10-01 2017-10-01T00:00 Beijing 19 19 true 19 19 19 19.19 19.19 char19 19 -2 2017-10-01 2017-10-01T00:00 Beijing 2 2 true 2 2 2 2.2 2.2 char2 2 -20 2017-10-01 2017-10-01T00:00 Beijing 20 20 true 20 20 20 20.2 20.2 char20 20 -21 2017-10-01 2017-10-01T00:00 Beijing 21 21 true 21 21 21 21.21 21.21 char21 21 -22 2017-10-01 2017-10-01T00:00 Beijing 22 22 true 22 22 22 22.22 22.22 char22 22 -23 2017-10-01 2017-10-01T00:00 Beijing 23 23 true 23 23 23 23.23 23.23 char23 23 -24 2017-10-01 2017-10-01T00:00 Beijing 24 24 true 24 24 24 24.24 24.24 char24 24 -25 2017-10-01 2017-10-01T00:00 Beijing 25 25 true 25 25 25 25.25 25.25 char25 25 -26 2017-10-01 2017-10-01T00:00 Beijing 26 26 true 26 26 26 26.26 26.26 char26 26 -27 2017-10-01 2017-10-01T00:00 Beijing 27 27 true 27 27 27 27.27 27.27 char27 27 -28 2017-10-01 2017-10-01T00:00 Beijing 28 28 true 28 28 28 28.28 28.28 char28 28 -29 2017-10-01 2017-10-01T00:00 Beijing 29 29 true 29 29 29 29.29 29.29 char29 29 -3 2017-10-01 2017-10-01T00:00 Beijing 3 3 true 3 3 3 3.3 3.3 char3 3 -30 2017-10-01 2017-10-01T00:00 Beijing 30 30 true 30 30 30 30.3 30.3 char30 30 -31 2017-10-01 2017-10-01T00:00 Beijing 31 31 true 31 31 31 31.31 31.31 char31 31 -32 2017-10-01 2017-10-01T00:00 Beijing 32 32 true 32 32 32 32.32 32.32 char32 32 -33 2017-10-01 2017-10-01T00:00 Beijing 33 33 true 33 33 33 33.33 33.33 char33 33 -34 2017-10-01 2017-10-01T00:00 Beijing 34 34 true 34 34 34 34.34 34.34 char34 34 -35 2017-10-01 2017-10-01T00:00 Beijing 35 35 true 35 35 35 35.35 35.35 char35 35 -36 2017-10-01 2017-10-01T00:00 Beijing 36 36 true 36 36 36 36.36 36.36 char36 36 -37 2017-10-01 2017-10-01T00:00 Beijing 37 37 true 37 37 37 37.37 37.37 char37 37 -38 2017-10-01 2017-10-01T00:00 Beijing 38 38 true 38 38 38 38.38 38.38 char38 38 -39 2017-10-01 2017-10-01T00:00 Beijing 39 39 true 39 39 39 39.39 39.39 char39 39 -4 2017-10-01 2017-10-01T00:00 Beijing 4 4 true 4 4 4 4.4 4.4 char4 4 -40 2017-10-01 2017-10-01T00:00 Beijing 40 40 true 40 40 40 40.4 40.4 char40 40 -41 2017-10-01 2017-10-01T00:00 Beijing 41 41 true 41 41 41 41.41 41.41 char41 41 -42 2017-10-01 2017-10-01T00:00 Beijing 42 42 true 42 42 42 42.42 42.42 char42 42 -43 2017-10-01 2017-10-01T00:00 Beijing 43 43 true 43 43 43 43.43 43.43 char43 43 -44 2017-10-01 2017-10-01T00:00 Beijing 44 44 true 44 44 44 44.44 44.44 char44 44 -45 2017-10-01 2017-10-01T00:00 Beijing 45 45 true 45 45 45 45.45 45.45 char45 45 -46 2017-10-01 2017-10-01T00:00 Beijing 46 46 true 46 46 46 46.46 46.46 char46 46 -47 2017-10-01 2017-10-01T00:00 Beijing 47 47 true 47 47 47 47.47 47.47 char47 47 -48 2017-10-01 2017-10-01T00:00 Beijing 48 48 true 48 48 48 48.48 48.48 char48 48 -49 2017-10-01 2017-10-01T00:00 Beijing 49 49 true 49 49 49 49.49 49.49 char49 49 -5 2017-10-01 2017-10-01T00:00 Beijing 5 5 true 5 5 5 5.5 5.5 char5 5 -50 2017-10-01 2017-10-01T00:00 Beijing 50 50 true 50 50 50 50.5 50.5 char50 50 -51 2017-10-01 2017-10-01T00:00 Beijing 51 51 true 51 51 51 51.51 51.51 char51 51 -52 2017-10-01 2017-10-01T00:00 Beijing 52 52 true 52 52 52 52.52 52.52 char52 52 -53 2017-10-01 2017-10-01T00:00 Beijing 53 53 true 53 53 53 53.53 53.53 char53 53 -54 2017-10-01 2017-10-01T00:00 Beijing 54 54 true 54 54 54 54.54 54.54 char54 54 -55 2017-10-01 2017-10-01T00:00 Beijing 55 55 true 55 55 55 55.55 55.55 char55 55 -56 2017-10-01 2017-10-01T00:00 Beijing 56 56 true 56 56 56 56.56 56.56 char56 56 -57 2017-10-01 2017-10-01T00:00 Beijing 57 57 true 57 57 57 57.57 57.57 char57 57 -58 2017-10-01 2017-10-01T00:00 Beijing 58 58 true 58 58 58 58.58 58.58 char58 58 -59 2017-10-01 2017-10-01T00:00 Beijing 59 59 true 59 59 59 59.59 59.59 char59 59 -6 2017-10-01 2017-10-01T00:00 Beijing 6 6 true 6 6 6 6.6 6.6 char6 6 -60 2017-10-01 2017-10-01T00:00 Beijing 60 60 true 60 60 60 60.6 60.6 char60 60 -61 2017-10-01 2017-10-01T00:00 Beijing 61 61 true 61 61 61 61.61 61.61 char61 61 -62 2017-10-01 2017-10-01T00:00 Beijing 62 62 true 62 62 62 62.62 62.62 char62 62 -63 2017-10-01 2017-10-01T00:00 Beijing 63 63 true 63 63 63 63.63 63.63 char63 63 -64 2017-10-01 2017-10-01T00:00 Beijing 64 64 true 64 64 64 64.64 64.64 char64 64 -65 2017-10-01 2017-10-01T00:00 Beijing 65 65 true 65 65 65 65.65 65.65 char65 65 -66 2017-10-01 2017-10-01T00:00 Beijing 66 66 true 66 66 66 66.66 66.66 char66 66 -67 2017-10-01 2017-10-01T00:00 Beijing 67 67 true 67 67 67 67.67 67.67 char67 67 -68 2017-10-01 2017-10-01T00:00 Beijing 68 68 true 68 68 68 68.68 68.68 char68 68 -69 2017-10-01 2017-10-01T00:00 Beijing 69 69 true 69 69 69 69.69 69.69 char69 69 -7 2017-10-01 2017-10-01T00:00 Beijing 7 7 true 7 7 7 7.7 7.7 char7 7 -70 2017-10-01 2017-10-01T00:00 Beijing 70 70 true 70 70 70 70.7 70.7 char70 70 -71 2017-10-01 2017-10-01T00:00 Beijing 71 71 true 71 71 71 71.71 71.71 char71 71 -72 2017-10-01 2017-10-01T00:00 Beijing 72 72 true 72 72 72 72.72 72.72 char72 72 -73 2017-10-01 2017-10-01T00:00 Beijing 73 73 true 73 73 73 73.73 73.73 char73 73 -74 2017-10-01 2017-10-01T00:00 Beijing 74 74 true 74 74 74 74.74 74.74 char74 74 -75 2017-10-01 2017-10-01T00:00 Beijing 75 75 true 75 75 75 75.75 75.75 char75 75 -76 2017-10-01 2017-10-01T00:00 Beijing 76 76 true 76 76 76 76.76 76.76 char76 76 -77 2017-10-01 2017-10-01T00:00 Beijing 77 77 true 77 77 77 77.77 77.77 char77 77 -78 2017-10-01 2017-10-01T00:00 Beijing 78 78 true 78 78 78 78.78 78.78 char78 78 -79 2017-10-01 2017-10-01T00:00 Beijing 79 79 true 79 79 79 79.79 79.79 char79 79 -8 2017-10-01 2017-10-01T00:00 Beijing 8 8 true 8 8 8 8.8 8.8 char8 8 -80 2017-10-01 2017-10-01T00:00 Beijing 80 80 true 80 80 80 80.8 80.8 char80 80 -81 2017-10-01 2017-10-01T00:00 Beijing 81 81 true 81 81 81 81.81 81.81 char81 81 -82 2017-10-01 2017-10-01T00:00 Beijing 82 82 true 82 82 82 82.82 82.82 char82 82 -83 2017-10-01 2017-10-01T00:00 Beijing 83 83 true 83 83 83 83.83 83.83 char83 83 -84 2017-10-01 2017-10-01T00:00 Beijing 84 84 true 84 84 84 84.84 84.84 char84 84 -85 2017-10-01 2017-10-01T00:00 Beijing 85 85 true 85 85 85 85.85 85.85 char85 85 -86 2017-10-01 2017-10-01T00:00 Beijing 86 86 true 86 86 86 86.86 86.86 char86 86 -87 2017-10-01 2017-10-01T00:00 Beijing 87 87 true 87 87 87 87.87 87.87 char87 87 -88 2017-10-01 2017-10-01T00:00 Beijing 88 88 true 88 88 88 88.88 88.88 char88 88 -89 2017-10-01 2017-10-01T00:00 Beijing 89 89 true 89 89 89 89.89 89.89 char89 89 -9 2017-10-01 2017-10-01T00:00 Beijing 9 9 true 9 9 9 9.9 9.9 char9 9 -90 2017-10-01 2017-10-01T00:00 Beijing 90 90 true 90 90 90 90.9 90.9 char90 90 -91 2017-10-01 2017-10-01T00:00 Beijing 91 91 true 91 91 91 91.91 91.91 char91 91 -92 2017-10-01 2017-10-01T00:00 Beijing 92 92 true 92 92 92 92.92 92.92 char92 92 -93 2017-10-01 2017-10-01T00:00 Beijing 93 93 true 93 93 93 93.93 93.93 char93 93 -94 2017-10-01 2017-10-01T00:00 Beijing 94 94 true 94 94 94 94.94 94.94 char94 94 -95 2017-10-01 2017-10-01T00:00 Beijing 95 95 true 95 95 95 95.95 95.95 char95 95 -96 2017-10-01 2017-10-01T00:00 Beijing 96 96 true 96 96 96 96.96 96.96 char96 96 -97 2017-10-01 2017-10-01T00:00 Beijing 97 97 true 97 97 97 97.97 97.97 char97 97 -98 2017-10-01 2017-10-01T00:00 Beijing 98 98 true 98 98 98 98.98 98.98 char98 98 -99 2017-10-01 2017-10-01T00:00 Beijing 99 99 true 99 99 99 99.99 99.99 char99 99 - --- !select_load1 -- -1 2017-10-01 2017-10-01 00:00:00 Beijing 1 1 true 1 1 1.1 1.1 char1 1 1 -10 2017-10-01 2017-10-01 00:00:00 Beijing 10 10 true 10 10 10.1 10.1 char10 10 10 -100 2017-10-01 2017-10-01 00:00:00 \N \N \N \N \N \N \N \N \N \N \N -11 2017-10-01 2017-10-01 00:00:00 Beijing 11 11 true 11 11 11.11 11.11 char11 11 11 -12 2017-10-01 2017-10-01 00:00:00 Beijing 12 12 true 12 12 12.12 12.12 char12 12 12 -13 2017-10-01 2017-10-01 00:00:00 Beijing 13 13 true 13 13 13.13 13.13 char13 13 13 -14 2017-10-01 2017-10-01 00:00:00 Beijing 14 14 true 14 14 14.14 14.14 char14 14 14 -15 2017-10-01 2017-10-01 00:00:00 Beijing 15 15 true 15 15 15.15 15.15 char15 15 15 -16 2017-10-01 2017-10-01 00:00:00 Beijing 16 16 true 16 16 16.16 16.16 char16 16 16 -17 2017-10-01 2017-10-01 00:00:00 Beijing 17 17 true 17 17 17.17 17.17 char17 17 17 -18 2017-10-01 2017-10-01 00:00:00 Beijing 18 18 true 18 18 18.18 18.18 char18 18 18 -19 2017-10-01 2017-10-01 00:00:00 Beijing 19 19 true 19 19 19.19 19.19 char19 19 19 -2 2017-10-01 2017-10-01 00:00:00 Beijing 2 2 true 2 2 2.2 2.2 char2 2 2 -20 2017-10-01 2017-10-01 00:00:00 Beijing 20 20 true 20 20 20.2 20.2 char20 20 20 -21 2017-10-01 2017-10-01 00:00:00 Beijing 21 21 true 21 21 21.21 21.21 char21 21 21 -22 2017-10-01 2017-10-01 00:00:00 Beijing 22 22 true 22 22 22.22 22.22 char22 22 22 -23 2017-10-01 2017-10-01 00:00:00 Beijing 23 23 true 23 23 23.23 23.23 char23 23 23 -24 2017-10-01 2017-10-01 00:00:00 Beijing 24 24 true 24 24 24.24 24.24 char24 24 24 -25 2017-10-01 2017-10-01 00:00:00 Beijing 25 25 true 25 25 25.25 25.25 char25 25 25 -26 2017-10-01 2017-10-01 00:00:00 Beijing 26 26 true 26 26 26.26 26.26 char26 26 26 -27 2017-10-01 2017-10-01 00:00:00 Beijing 27 27 true 27 27 27.27 27.27 char27 27 27 -28 2017-10-01 2017-10-01 00:00:00 Beijing 28 28 true 28 28 28.28 28.28 char28 28 28 -29 2017-10-01 2017-10-01 00:00:00 Beijing 29 29 true 29 29 29.29 29.29 char29 29 29 -3 2017-10-01 2017-10-01 00:00:00 Beijing 3 3 true 3 3 3.3 3.3 char3 3 3 -30 2017-10-01 2017-10-01 00:00:00 Beijing 30 30 true 30 30 30.3 30.3 char30 30 30 -31 2017-10-01 2017-10-01 00:00:00 Beijing 31 31 true 31 31 31.31 31.31 char31 31 31 -32 2017-10-01 2017-10-01 00:00:00 Beijing 32 32 true 32 32 32.32 32.32 char32 32 32 -33 2017-10-01 2017-10-01 00:00:00 Beijing 33 33 true 33 33 33.33 33.33 char33 33 33 -34 2017-10-01 2017-10-01 00:00:00 Beijing 34 34 true 34 34 34.34 34.34 char34 34 34 -35 2017-10-01 2017-10-01 00:00:00 Beijing 35 35 true 35 35 35.35 35.35 char35 35 35 -36 2017-10-01 2017-10-01 00:00:00 Beijing 36 36 true 36 36 36.36 36.36 char36 36 36 -37 2017-10-01 2017-10-01 00:00:00 Beijing 37 37 true 37 37 37.37 37.37 char37 37 37 -38 2017-10-01 2017-10-01 00:00:00 Beijing 38 38 true 38 38 38.38 38.38 char38 38 38 -39 2017-10-01 2017-10-01 00:00:00 Beijing 39 39 true 39 39 39.39 39.39 char39 39 39 -4 2017-10-01 2017-10-01 00:00:00 Beijing 4 4 true 4 4 4.4 4.4 char4 4 4 -40 2017-10-01 2017-10-01 00:00:00 Beijing 40 40 true 40 40 40.4 40.4 char40 40 40 -41 2017-10-01 2017-10-01 00:00:00 Beijing 41 41 true 41 41 41.41 41.41 char41 41 41 -42 2017-10-01 2017-10-01 00:00:00 Beijing 42 42 true 42 42 42.42 42.42 char42 42 42 -43 2017-10-01 2017-10-01 00:00:00 Beijing 43 43 true 43 43 43.43 43.43 char43 43 43 -44 2017-10-01 2017-10-01 00:00:00 Beijing 44 44 true 44 44 44.44 44.44 char44 44 44 -45 2017-10-01 2017-10-01 00:00:00 Beijing 45 45 true 45 45 45.45 45.45 char45 45 45 -46 2017-10-01 2017-10-01 00:00:00 Beijing 46 46 true 46 46 46.46 46.46 char46 46 46 -47 2017-10-01 2017-10-01 00:00:00 Beijing 47 47 true 47 47 47.47 47.47 char47 47 47 -48 2017-10-01 2017-10-01 00:00:00 Beijing 48 48 true 48 48 48.48 48.48 char48 48 48 -49 2017-10-01 2017-10-01 00:00:00 Beijing 49 49 true 49 49 49.49 49.49 char49 49 49 -5 2017-10-01 2017-10-01 00:00:00 Beijing 5 5 true 5 5 5.5 5.5 char5 5 5 -50 2017-10-01 2017-10-01 00:00:00 Beijing 50 50 true 50 50 50.5 50.5 char50 50 50 -51 2017-10-01 2017-10-01 00:00:00 Beijing 51 51 true 51 51 51.51 51.51 char51 51 51 -52 2017-10-01 2017-10-01 00:00:00 Beijing 52 52 true 52 52 52.52 52.52 char52 52 52 -53 2017-10-01 2017-10-01 00:00:00 Beijing 53 53 true 53 53 53.53 53.53 char53 53 53 -54 2017-10-01 2017-10-01 00:00:00 Beijing 54 54 true 54 54 54.54 54.54 char54 54 54 -55 2017-10-01 2017-10-01 00:00:00 Beijing 55 55 true 55 55 55.55 55.55 char55 55 55 -56 2017-10-01 2017-10-01 00:00:00 Beijing 56 56 true 56 56 56.56 56.56 char56 56 56 -57 2017-10-01 2017-10-01 00:00:00 Beijing 57 57 true 57 57 57.57 57.57 char57 57 57 -58 2017-10-01 2017-10-01 00:00:00 Beijing 58 58 true 58 58 58.58 58.58 char58 58 58 -59 2017-10-01 2017-10-01 00:00:00 Beijing 59 59 true 59 59 59.59 59.59 char59 59 59 -6 2017-10-01 2017-10-01 00:00:00 Beijing 6 6 true 6 6 6.6 6.6 char6 6 6 -60 2017-10-01 2017-10-01 00:00:00 Beijing 60 60 true 60 60 60.6 60.6 char60 60 60 -61 2017-10-01 2017-10-01 00:00:00 Beijing 61 61 true 61 61 61.61 61.61 char61 61 61 -62 2017-10-01 2017-10-01 00:00:00 Beijing 62 62 true 62 62 62.62 62.62 char62 62 62 -63 2017-10-01 2017-10-01 00:00:00 Beijing 63 63 true 63 63 63.63 63.63 char63 63 63 -64 2017-10-01 2017-10-01 00:00:00 Beijing 64 64 true 64 64 64.64 64.64 char64 64 64 -65 2017-10-01 2017-10-01 00:00:00 Beijing 65 65 true 65 65 65.65 65.65 char65 65 65 -66 2017-10-01 2017-10-01 00:00:00 Beijing 66 66 true 66 66 66.66 66.66 char66 66 66 -67 2017-10-01 2017-10-01 00:00:00 Beijing 67 67 true 67 67 67.67 67.67 char67 67 67 -68 2017-10-01 2017-10-01 00:00:00 Beijing 68 68 true 68 68 68.68 68.68 char68 68 68 -69 2017-10-01 2017-10-01 00:00:00 Beijing 69 69 true 69 69 69.69 69.69 char69 69 69 -7 2017-10-01 2017-10-01 00:00:00 Beijing 7 7 true 7 7 7.7 7.7 char7 7 7 -70 2017-10-01 2017-10-01 00:00:00 Beijing 70 70 true 70 70 70.7 70.7 char70 70 70 -71 2017-10-01 2017-10-01 00:00:00 Beijing 71 71 true 71 71 71.71 71.71 char71 71 71 -72 2017-10-01 2017-10-01 00:00:00 Beijing 72 72 true 72 72 72.72 72.72 char72 72 72 -73 2017-10-01 2017-10-01 00:00:00 Beijing 73 73 true 73 73 73.73 73.73 char73 73 73 -74 2017-10-01 2017-10-01 00:00:00 Beijing 74 74 true 74 74 74.74 74.74 char74 74 74 -75 2017-10-01 2017-10-01 00:00:00 Beijing 75 75 true 75 75 75.75 75.75 char75 75 75 -76 2017-10-01 2017-10-01 00:00:00 Beijing 76 76 true 76 76 76.76 76.76 char76 76 76 -77 2017-10-01 2017-10-01 00:00:00 Beijing 77 77 true 77 77 77.77 77.77 char77 77 77 -78 2017-10-01 2017-10-01 00:00:00 Beijing 78 78 true 78 78 78.78 78.78 char78 78 78 -79 2017-10-01 2017-10-01 00:00:00 Beijing 79 79 true 79 79 79.79 79.79 char79 79 79 -8 2017-10-01 2017-10-01 00:00:00 Beijing 8 8 true 8 8 8.8 8.8 char8 8 8 -80 2017-10-01 2017-10-01 00:00:00 Beijing 80 80 true 80 80 80.8 80.8 char80 80 80 -81 2017-10-01 2017-10-01 00:00:00 Beijing 81 81 true 81 81 81.81 81.81 char81 81 81 -82 2017-10-01 2017-10-01 00:00:00 Beijing 82 82 true 82 82 82.82 82.82 char82 82 82 -83 2017-10-01 2017-10-01 00:00:00 Beijing 83 83 true 83 83 83.83 83.83 char83 83 83 -84 2017-10-01 2017-10-01 00:00:00 Beijing 84 84 true 84 84 84.84 84.84 char84 84 84 -85 2017-10-01 2017-10-01 00:00:00 Beijing 85 85 true 85 85 85.85 85.85 char85 85 85 -86 2017-10-01 2017-10-01 00:00:00 Beijing 86 86 true 86 86 86.86 86.86 char86 86 86 -87 2017-10-01 2017-10-01 00:00:00 Beijing 87 87 true 87 87 87.87 87.87 char87 87 87 -88 2017-10-01 2017-10-01 00:00:00 Beijing 88 88 true 88 88 88.88 88.88 char88 88 88 -89 2017-10-01 2017-10-01 00:00:00 Beijing 89 89 true 89 89 89.89 89.89 char89 89 89 -9 2017-10-01 2017-10-01 00:00:00 Beijing 9 9 true 9 9 9.9 9.9 char9 9 9 -90 2017-10-01 2017-10-01 00:00:00 Beijing 90 90 true 90 90 90.9 90.9 char90 90 90 -91 2017-10-01 2017-10-01 00:00:00 Beijing 91 91 true 91 91 91.91 91.91 char91 91 91 -92 2017-10-01 2017-10-01 00:00:00 Beijing 92 92 true 92 92 92.92 92.92 char92 92 92 -93 2017-10-01 2017-10-01 00:00:00 Beijing 93 93 true 93 93 93.93 93.93 char93 93 93 -94 2017-10-01 2017-10-01 00:00:00 Beijing 94 94 true 94 94 94.94 94.94 char94 94 94 -95 2017-10-01 2017-10-01 00:00:00 Beijing 95 95 true 95 95 95.95 95.95 char95 95 95 -96 2017-10-01 2017-10-01 00:00:00 Beijing 96 96 true 96 96 96.96 96.96 char96 96 96 -97 2017-10-01 2017-10-01 00:00:00 Beijing 97 97 true 97 97 97.97 97.97 char97 97 97 -98 2017-10-01 2017-10-01 00:00:00 Beijing 98 98 true 98 98 98.98 98.98 char98 98 98 -99 2017-10-01 2017-10-01 00:00:00 Beijing 99 99 true 99 99 99.99 99.99 char99 99 99 - --- !select_load1 -- -20 2017-10-01 2017-10-01 00:00:00 Beijing 20 20 true 20 20 20.2 20.2 char20 20 20 -21 2017-10-01 2017-10-01 00:00:00 Beijing 21 21 true 21 21 21.21 21.21 char21 21 21 -22 2017-10-01 2017-10-01 00:00:00 Beijing 22 22 true 22 22 22.22 22.22 char22 22 22 -23 2017-10-01 2017-10-01 00:00:00 Beijing 23 23 true 23 23 23.23 23.23 char23 23 23 -24 2017-10-01 2017-10-01 00:00:00 Beijing 24 24 true 24 24 24.24 24.24 char24 24 24 -25 2017-10-01 2017-10-01 00:00:00 Beijing 25 25 true 25 25 25.25 25.25 char25 25 25 -26 2017-10-01 2017-10-01 00:00:00 Beijing 26 26 true 26 26 26.26 26.26 char26 26 26 -27 2017-10-01 2017-10-01 00:00:00 Beijing 27 27 true 27 27 27.27 27.27 char27 27 27 -28 2017-10-01 2017-10-01 00:00:00 Beijing 28 28 true 28 28 28.28 28.28 char28 28 28 -29 2017-10-01 2017-10-01 00:00:00 Beijing 29 29 true 29 29 29.29 29.29 char29 29 29 -30 2017-10-01 2017-10-01 00:00:00 Beijing 30 30 true 30 30 30.3 30.3 char30 30 30 -31 2017-10-01 2017-10-01 00:00:00 Beijing 31 31 true 31 31 31.31 31.31 char31 31 31 -32 2017-10-01 2017-10-01 00:00:00 Beijing 32 32 true 32 32 32.32 32.32 char32 32 32 -33 2017-10-01 2017-10-01 00:00:00 Beijing 33 33 true 33 33 33.33 33.33 char33 33 33 -34 2017-10-01 2017-10-01 00:00:00 Beijing 34 34 true 34 34 34.34 34.34 char34 34 34 -35 2017-10-01 2017-10-01 00:00:00 Beijing 35 35 true 35 35 35.35 35.35 char35 35 35 -36 2017-10-01 2017-10-01 00:00:00 Beijing 36 36 true 36 36 36.36 36.36 char36 36 36 -37 2017-10-01 2017-10-01 00:00:00 Beijing 37 37 true 37 37 37.37 37.37 char37 37 37 -38 2017-10-01 2017-10-01 00:00:00 Beijing 38 38 true 38 38 38.38 38.38 char38 38 38 -39 2017-10-01 2017-10-01 00:00:00 Beijing 39 39 true 39 39 39.39 39.39 char39 39 39 -40 2017-10-01 2017-10-01 00:00:00 Beijing 40 40 true 40 40 40.4 40.4 char40 40 40 -41 2017-10-01 2017-10-01 00:00:00 Beijing 41 41 true 41 41 41.41 41.41 char41 41 41 -42 2017-10-01 2017-10-01 00:00:00 Beijing 42 42 true 42 42 42.42 42.42 char42 42 42 -43 2017-10-01 2017-10-01 00:00:00 Beijing 43 43 true 43 43 43.43 43.43 char43 43 43 -44 2017-10-01 2017-10-01 00:00:00 Beijing 44 44 true 44 44 44.44 44.44 char44 44 44 -45 2017-10-01 2017-10-01 00:00:00 Beijing 45 45 true 45 45 45.45 45.45 char45 45 45 -46 2017-10-01 2017-10-01 00:00:00 Beijing 46 46 true 46 46 46.46 46.46 char46 46 46 -47 2017-10-01 2017-10-01 00:00:00 Beijing 47 47 true 47 47 47.47 47.47 char47 47 47 -48 2017-10-01 2017-10-01 00:00:00 Beijing 48 48 true 48 48 48.48 48.48 char48 48 48 -49 2017-10-01 2017-10-01 00:00:00 Beijing 49 49 true 49 49 49.49 49.49 char49 49 49 -50 2017-10-01 2017-10-01 00:00:00 Beijing 50 50 true 50 50 50.5 50.5 char50 50 50 -51 2017-10-01 2017-10-01 00:00:00 Beijing 51 51 true 51 51 51.51 51.51 char51 51 51 -52 2017-10-01 2017-10-01 00:00:00 Beijing 52 52 true 52 52 52.52 52.52 char52 52 52 -53 2017-10-01 2017-10-01 00:00:00 Beijing 53 53 true 53 53 53.53 53.53 char53 53 53 -54 2017-10-01 2017-10-01 00:00:00 Beijing 54 54 true 54 54 54.54 54.54 char54 54 54 -55 2017-10-01 2017-10-01 00:00:00 Beijing 55 55 true 55 55 55.55 55.55 char55 55 55 -56 2017-10-01 2017-10-01 00:00:00 Beijing 56 56 true 56 56 56.56 56.56 char56 56 56 -57 2017-10-01 2017-10-01 00:00:00 Beijing 57 57 true 57 57 57.57 57.57 char57 57 57 -58 2017-10-01 2017-10-01 00:00:00 Beijing 58 58 true 58 58 58.58 58.58 char58 58 58 -59 2017-10-01 2017-10-01 00:00:00 Beijing 59 59 true 59 59 59.59 59.59 char59 59 59 -60 2017-10-01 2017-10-01 00:00:00 Beijing 60 60 true 60 60 60.6 60.6 char60 60 60 -61 2017-10-01 2017-10-01 00:00:00 Beijing 61 61 true 61 61 61.61 61.61 char61 61 61 -62 2017-10-01 2017-10-01 00:00:00 Beijing 62 62 true 62 62 62.62 62.62 char62 62 62 -63 2017-10-01 2017-10-01 00:00:00 Beijing 63 63 true 63 63 63.63 63.63 char63 63 63 -64 2017-10-01 2017-10-01 00:00:00 Beijing 64 64 true 64 64 64.64 64.64 char64 64 64 -65 2017-10-01 2017-10-01 00:00:00 Beijing 65 65 true 65 65 65.65 65.65 char65 65 65 -66 2017-10-01 2017-10-01 00:00:00 Beijing 66 66 true 66 66 66.66 66.66 char66 66 66 -67 2017-10-01 2017-10-01 00:00:00 Beijing 67 67 true 67 67 67.67 67.67 char67 67 67 -68 2017-10-01 2017-10-01 00:00:00 Beijing 68 68 true 68 68 68.68 68.68 char68 68 68 -69 2017-10-01 2017-10-01 00:00:00 Beijing 69 69 true 69 69 69.69 69.69 char69 69 69 - diff --git a/regression-test/data/external_table_p0/jdbc/test_oracle_jdbc_catalog.out b/regression-test/data/external_table_p0/jdbc/test_oracle_jdbc_catalog.out index 0545ddafac5c00..519c7ab131cd98 100644 --- a/regression-test/data/external_table_p0/jdbc/test_oracle_jdbc_catalog.out +++ b/regression-test/data/external_table_p0/jdbc/test_oracle_jdbc_catalog.out @@ -187,15 +187,6 @@ DORIS_TEST 3 jerry 23 88.0 4 andy 21 93.0 --- !lower_case_column_names1 -- -1 doris 3 1.0 - --- !lower_case_column_names2 -- -1 doris 3 1.0 - --- !lower_case_column_names3 -- -1 doris 3 1.0 - -- !query_clob -- 10010 liantong 10086 yidong diff --git a/regression-test/data/nereids_p0/sql_functions/window_functions/test_window_function.out b/regression-test/data/nereids_p0/sql_functions/window_functions/test_window_function.out index 44dbf7c998d1fd..e06d5a30ddc867 100644 --- a/regression-test/data/nereids_p0/sql_functions/window_functions/test_window_function.out +++ b/regression-test/data/nereids_p0/sql_functions/window_functions/test_window_function.out @@ -1,4 +1,13 @@ -- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +1 +2 +3 +4 +5 +6 +7 + -- !sql -- JDR 2014-10-02T00:00 12.86 12.875 JDR 2014-10-03T00:00 12.89 12.896666667 diff --git a/regression-test/data/schema_change_p0/test_alter_parition.out b/regression-test/data/schema_change_p0/test_alter_parition.out deleted file mode 100644 index cad31310640874..00000000000000 --- a/regression-test/data/schema_change_p0/test_alter_parition.out +++ /dev/null @@ -1,6 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !select -- -1 2017-01-01 Beijing 10 1 1 30 20 \N \N -2 2017-02-01 Beijing 10 1 1 31 19 \N \N -3 2017-03-01 Beijing 10 1 1 31 21 \N \N - diff --git a/regression-test/data/schema_change_p0/test_alter_table_add_columns.out b/regression-test/data/schema_change_p0/test_alter_table_add_columns.out deleted file mode 100644 index 809993434adc3b..00000000000000 --- a/regression-test/data/schema_change_p0/test_alter_table_add_columns.out +++ /dev/null @@ -1,40 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 1 2 -2 2 xxx 2 1 2 -3 3 xxx 3 1 2 - --- !order -- -1 1 xxx 1 1 2 -2 2 xxx 2 1 2 -3 3 xxx 3 1 2 -4 4 yyy 4 4 4 -5 5 yyy 5 5 5 -6 6 yyy 6 6 6 - --- !sql -- -siteid INT Yes true 10 -citycode SMALLINT Yes true \N -username VARCHAR(32) Yes true test -new_k1 INT Yes true 1 -new_k2 INT Yes true 2 -pv BIGINT Yes false 0 SUM -new_v1 INT Yes false 1 MAX -new_v2 INT Yes false 2 MAX - --- !order -- -1 1 xxx 1 2 1 1 2 -2 2 xxx 1 2 2 1 2 -3 3 xxx 1 2 3 1 2 -4 4 yyy 1 2 4 4 4 -5 5 yyy 1 2 5 5 5 -6 6 yyy 1 2 6 6 6 -7 7 zzz 7 7 7 7 7 -8 8 zzz 8 8 8 8 8 -9 9 zzz 9 9 9 9 9 - diff --git a/regression-test/data/schema_change_p0/test_alter_table_drop_column.out b/regression-test/data/schema_change_p0/test_alter_table_drop_column.out deleted file mode 100644 index 4da609439977ec..00000000000000 --- a/regression-test/data/schema_change_p0/test_alter_table_drop_column.out +++ /dev/null @@ -1,41 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - diff --git a/regression-test/data/schema_change_p0/test_alter_table_modify_column.out b/regression-test/data/schema_change_p0/test_alter_table_modify_column.out deleted file mode 100644 index 947717f4adf506..00000000000000 --- a/regression-test/data/schema_change_p0/test_alter_table_modify_column.out +++ /dev/null @@ -1,29 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 - --- !order -- -xxx 1 1 1 -xxx 2 2 2 -xxx 3 3 3 -yyy 4 4 4 - --- !order -- -1 1 xxx 1 -2 2 xxx 2 -3 3 xxx 3 -4 4 yyy 4 -5 5 zzz 5 - diff --git a/regression-test/data/schema_change_p0/test_alter_table_replace.out b/regression-test/data/schema_change_p0/test_alter_table_replace.out deleted file mode 100644 index 40698fdd537f25..00000000000000 --- a/regression-test/data/schema_change_p0/test_alter_table_replace.out +++ /dev/null @@ -1,26 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !select -- -1 2017-01-01 Beijing 10 1 1 30 20 \N \N -2 2017-02-01 Beijing 10 1 1 31 19 \N \N -3 2017-03-01 Beijing 10 1 1 31 21 \N \N - --- !select -- -4 2017-01-01 Beijing 10 1 1 30 20 \N \N -5 2017-02-01 Beijing 10 1 1 31 19 \N \N -6 2017-03-01 Beijing 10 1 1 31 21 \N \N - --- !select -- -4 2017-01-01 Beijing 10 1 1 30 20 \N \N -5 2017-02-01 Beijing 10 1 1 31 19 \N \N -6 2017-03-01 Beijing 10 1 1 31 21 \N \N - --- !select -- -1 2017-01-01 Beijing 10 1 1 30 20 \N \N -2 2017-02-01 Beijing 10 1 1 31 19 \N \N -3 2017-03-01 Beijing 10 1 1 31 21 \N \N - --- !select -- -1 2017-01-01 Beijing 10 1 1 30 20 \N \N -2 2017-02-01 Beijing 10 1 1 31 19 \N \N -3 2017-03-01 Beijing 10 1 1 31 21 \N \N - diff --git a/regression-test/suites/correctness_p0/test_distinct_agg.groovy b/regression-test/suites/correctness_p0/test_distinct_agg.groovy index 788f5271a6cedc..d69842a5fe6d6b 100644 --- a/regression-test/suites/correctness_p0/test_distinct_agg.groovy +++ b/regression-test/suites/correctness_p0/test_distinct_agg.groovy @@ -56,7 +56,7 @@ suite("test_distinct_agg") { result([['1', '2023-01-10', 1L]]) } - sql '''SELECT `b`.`dt` AS `dt` + qt_distinct_1 '''SELECT `b`.`dt` AS `dt` FROM (SELECT `dt`AS `dt`, count(DISTINCT `role_id`) AS `pay_role`, @@ -117,4 +117,46 @@ suite("test_distinct_agg") { sql 'drop view if exists dim_v2' sql 'drop view if exists dim_v3' sql 'drop table if exists test_distinct_agg_t' + + sql "drop table if exists multi_distinct_agg_tab;" + + sql """ + CREATE TABLE `multi_distinct_agg_tab` ( + `k1` bigint(20) NULL, + `k2` varchar(20) NULL, + `d1` DECIMAL(18, 0) NULL, + `d2` DECIMAL(38, 0) NULL + ) ENGINE = OLAP DUPLICATE KEY(`k1`) DISTRIBUTED BY HASH(`k1`) BUCKETS 2 PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + + sql """ + INSERT INTO + `multi_distinct_agg_tab` (`k1`, `k2`, `d1`, `d2`) + VALUES (1, 'aaa', 123, 356),(2, 'bbb', 123, 789), (3, 'ccc', 246, 789); + """ + sql "sync" + + qt_multi_distinct_1 """ + select + count(distinct d1), + count(distinct d2) + from + multi_distinct_agg_tab; + """ + qt_multi_distinct_2 """ + select + sum(distinct d1), + sum(distinct d2) + from + multi_distinct_agg_tab; + """ + qt_multi_distinct_3 """ + select + avg(distinct d1), + avg(distinct d2) + from + multi_distinct_agg_tab; + """ } \ No newline at end of file diff --git a/regression-test/suites/correctness_p0/test_inlineview_with_window_function.groovy b/regression-test/suites/correctness_p0/test_inlineview_with_window_function.groovy index cdeb65e9ec5939..359874f52d03c3 100644 --- a/regression-test/suites/correctness_p0/test_inlineview_with_window_function.groovy +++ b/regression-test/suites/correctness_p0/test_inlineview_with_window_function.groovy @@ -16,6 +16,7 @@ // under the License. suite("test_inlineview_with_window_function") { + sql """set enable_nereids_planner=false;""" sql """ drop view if exists test_table_aa; """ @@ -151,4 +152,19 @@ suite("test_inlineview_with_window_function") { group by ordernum )tmp1 on tmp.ordernum=tmp1.ordernum;""" + + qt_order2 """ + SELECT + row_number() over(partition by add_date order by pc_num desc) + ,row_number() over(partition by add_date order by vc_num desc) + ,row_number() over(partition by add_date order by vt_num desc) + FROM ( + SELECT + cast(dnt as datev2) add_date + ,row_number() over(order by dnt) pc_num + ,row_number() over(order by dnt) vc_num + ,row_number() over(order by dnt) vt_num + FROM test_table_aaa + ) t; + """ } diff --git a/regression-test/suites/export_p0/export/test_show_export.groovy b/regression-test/suites/export_p0/export/test_show_export.groovy deleted file mode 100644 index 51e186dcb6ef3d..00000000000000 --- a/regression-test/suites/export_p0/export/test_show_export.groovy +++ /dev/null @@ -1,228 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -import org.codehaus.groovy.runtime.IOGroovyMethods - -import java.nio.charset.StandardCharsets -import java.nio.file.Files -import java.nio.file.Paths - -suite("test_show_export", "p0") { - // open nereids - sql """ set enable_nereids_planner=true """ - sql """ set enable_fallback_to_original_planner=false """ - - - String ak = getS3AK() - String sk = getS3SK() - String s3_endpoint = getS3Endpoint() - String region = getS3Region() - String bucket = context.config.otherConfigs.get("s3BucketName"); - - - def thisDb = "regression_test_export_p0_export" - def table_export_name = "test_show_export" - def table_load_name = "test_show_export_load_back" - def outfile_path_prefix = """${bucket}/export/test_show_export/exp_""" - - // create table and insert - sql """ DROP TABLE IF EXISTS ${table_export_name} """ - sql """ - CREATE TABLE IF NOT EXISTS ${table_export_name} ( - `user_id` INT NOT NULL COMMENT "用户id", - `date` DATE NOT NULL COMMENT "数据灌入日期时间", - `datetime` DATETIME NOT NULL COMMENT "数据灌入日期时间", - `city` VARCHAR(20) COMMENT "用户所在城市", - `age` SMALLINT COMMENT "用户年龄", - `sex` TINYINT COMMENT "用户性别", - `bool_col` boolean COMMENT "", - `int_col` int COMMENT "", - `bigint_col` bigint COMMENT "", - `largeint_col` largeint COMMENT "", - `float_col` float COMMENT "", - `double_col` double COMMENT "", - `char_col` CHAR(10) COMMENT "", - `decimal_col` decimal COMMENT "" - ) - DISTRIBUTED BY HASH(user_id) PROPERTIES("replication_num" = "1"); - """ - StringBuilder sb = new StringBuilder() - int i = 1 - for (; i < 100; i ++) { - sb.append(""" - (${i}, '2017-10-01', '2017-10-01 00:00:00', 'Beijing', ${i}, ${i % 128}, true, ${i}, ${i}, ${i}, ${i}.${i}, ${i}.${i}, 'char${i}', ${i}), - """) - } - sb.append(""" - (${i}, '2017-10-01', '2017-10-01 00:00:00', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) - """) - sql """ INSERT INTO ${table_export_name} VALUES - ${sb.toString()} - """ - def insert_res = sql "show last insert;" - logger.info("insert result: " + insert_res.toString()) - order_qt_select_export1 """ SELECT * FROM ${table_export_name} t ORDER BY user_id; """ - - - def waiting_export = { export_label -> - while (true) { - def res = sql """ show export where label = "${export_label}";""" - logger.info("export state: " + res[0][2]) - if (res[0][2] == "FINISHED") { - def json = parseJson(res[0][11]) - assert json instanceof List - assertEquals("1", json.fileNumber[0][0]) - log.info("outfile_path: ${json.url[0][0]}") - return json.url[0][0]; - } else if (res[0][2] == "CANCELLED") { - throw new IllegalStateException("""export failed: ${res[0][10]}""") - } else { - sleep(5000) - } - } - } - - // 1. exec export - def uuid = UUID.randomUUID().toString() - def outFilePath = """${outfile_path_prefix}_${uuid}""" - def label = "label_${uuid}" - try { - // exec export - sql """ - EXPORT TABLE ${table_export_name} TO "s3://${outFilePath}/" - PROPERTIES( - "label" = "${label}", - "format" = "parquet", - 'columns' = 'user_id, date, datetime, city, age, sex, bool_col, int_col, bigint_col, float_col, double_col, char_col, decimal_col, largeint_col' - ) - WITH S3( - "s3.endpoint" = "${s3_endpoint}", - "s3.region" = "${region}", - "s3.secret_key"="${sk}", - "s3.access_key" = "${ak}" - ); - """ - def outfile_url = waiting_export.call(label) - - order_qt_select_load1 """ select * from s3( - "uri" = "http://${s3_endpoint}${outfile_url.substring(4)}0.parquet", - "s3.access_key"= "${ak}", - "s3.secret_key" = "${sk}", - "format" = "parquet", - "region" = "${region}" - ) ORDER BY user_id; - """ - - } finally { - } - - - // 2. exec export - uuid = UUID.randomUUID().toString() - outFilePath = """${outfile_path_prefix}_${uuid}""" - label = "label_${uuid}" - try { - // exec export - sql """ - EXPORT TABLE ${table_export_name} where user_id < 70 and user_id >= 20 TO "s3://${outFilePath}/" - PROPERTIES( - "label" = "${label}", - "format" = "parquet", - 'columns' = 'user_id, date, datetime, city, age, sex, bool_col, int_col, bigint_col, float_col, double_col, char_col, decimal_col, largeint_col' - ) - WITH S3( - "s3.endpoint" = "${s3_endpoint}", - "s3.region" = "${region}", - "s3.secret_key"="${sk}", - "s3.access_key" = "${ak}" - ); - """ - def outfile_url = waiting_export.call(label) - - order_qt_select_load1 """ select * from s3( - "uri" = "http://${s3_endpoint}${outfile_url.substring(4)}0.parquet", - "s3.access_key"= "${ak}", - "s3.secret_key" = "${sk}", - "format" = "parquet", - "region" = "${region}" - ) ORDER BY user_id; - """ - - } finally { - } - - // test show export - def show_export_res1 = sql_return_maparray "show export;" - assertEquals(2, show_export_res1.size()) - - // test: show proc - def show_proc_jobs = sql_return_maparray """show proc "/jobs";""" - def dbId = "" - for (def row : show_proc_jobs) { - if (row.DbName == "default_cluster:regression_test_export_p0_export") { - dbId = row.DbId - break - } - } - // test: show proc "/jobs/${dbId}"" - def show_proc_jobs_DB = sql_return_maparray """show proc "/jobs/${dbId}";""" - for (def row : show_proc_jobs) { - if (row.JobType == "export") { - assertEquals(2, row.Finished) - assertEquals(2, row.Total) - break - } - } - // test: show proc "/jobs/${dbId}/export" - def show_proc_jobs_dbid = sql_return_maparray """show proc "/jobs/${dbId}/export";""" - assertEquals(2, show_proc_jobs_dbid.size()) - - def show_export_res2 = sql_return_maparray "show export from ${thisDb};" - assertEquals(2, show_export_res2.size()) - - def show_export_res3 = sql_return_maparray "show export from ${thisDb} order by JobId;" - assertEquals(2, show_export_res3.size()) - - def show_export_res4 = sql_return_maparray "show export from ${thisDb} order by JobId, State;" - assertEquals(2, show_export_res4.size()) - - def show_export_res5 = sql_return_maparray "show export from ${thisDb} order by JobId, State limit 1" - assertEquals(1, show_export_res5.size()) - - def show_export_res6 = sql_return_maparray "show export from ${thisDb} order by JobId, State limit 1" - assertEquals(1, show_export_res6.size()) - - def jobId = show_export_res6[0].JobId - def show_export_res7 = sql_return_maparray "show export from ${thisDb} where Id = ${jobId} order by JobId, State" - assertEquals(1, show_export_res7.size()) - - // test where label like - def show_export_label_like = sql_return_maparray """show export from ${thisDb} where Label like "%${uuid}" """ - def show_export_label_eq = sql_return_maparray """show export from ${thisDb} where Label = "${label}" """ - assertEquals(show_export_label_like[0].JobId, show_export_label_eq[0].JobId) - - def show_export_res8 = sql_return_maparray """show export from ${thisDb} where STATE = "FINISHED" """ - assertEquals(2, show_export_res8.size()) - - // test invalid where - test { - sql "show export from ${thisDb} where Progress = 100" - // check exception - exception """Where clause should looks like below""" - } - -} diff --git a/regression-test/suites/external_table_p0/jdbc/test_oracle_jdbc_catalog.groovy b/regression-test/suites/external_table_p0/jdbc/test_oracle_jdbc_catalog.groovy index f3463763b87289..e762f2f4aaf41f 100644 --- a/regression-test/suites/external_table_p0/jdbc/test_oracle_jdbc_catalog.groovy +++ b/regression-test/suites/external_table_p0/jdbc/test_oracle_jdbc_catalog.groovy @@ -214,9 +214,6 @@ suite("test_oracle_jdbc_catalog", "p0,external,oracle,external_docker,external_d // test lower case name order_qt_lower_case_table_names4 """ select * from student2 order by id; """ - order_qt_lower_case_column_names1 """ select * from student3 order by id; """ - order_qt_lower_case_column_names2 """ select * from student3 where id = 1 order by id; """ - order_qt_lower_case_column_names3 """ select * from student3 where id = 1 and name = 'doris' order by id; """ sql """drop catalog if exists ${catalog_name} """ diff --git a/regression-test/suites/nereids_p0/group_concat/test_group_concat.groovy b/regression-test/suites/nereids_p0/group_concat/test_group_concat.groovy index a570ac3da165ed..ecb4126afa1d12 100644 --- a/regression-test/suites/nereids_p0/group_concat/test_group_concat.groovy +++ b/regression-test/suites/nereids_p0/group_concat/test_group_concat.groovy @@ -115,7 +115,7 @@ suite("test_group_concat") { """ sql """create view if not exists test_view as SELECT b1, group_concat(cast(abs(b3) as varchar) order by abs(b2) desc, b3 desc) FROM table_group_concat group by b1 order by b1;""" - qt_select_group_concat_order_by_desc4 """ + order_qt_select_group_concat_order_by_desc4 """ select * from test_view; """ sql """drop view if exists test_view""" diff --git a/regression-test/suites/nereids_p0/sql_functions/window_functions/test_window_function.groovy b/regression-test/suites/nereids_p0/sql_functions/window_functions/test_window_function.groovy index d17dd8350e5234..26b770e8687ed1 100644 --- a/regression-test/suites/nereids_p0/sql_functions/window_functions/test_window_function.groovy +++ b/regression-test/suites/nereids_p0/sql_functions/window_functions/test_window_function.groovy @@ -24,6 +24,7 @@ suite("test_window_function") { sql """ INSERT INTO ${windowFunctionTable1} VALUES ('JDR',12.86,'2014-10-02 00:00:00','2014-10-02 00:00:00.111111','2014-10-02 00:00:00.111111','2014-10-02 00:00:00.111111'),('JDR',12.89,'2014-10-03 00:00:00','2014-10-03 00:00:00.111111','2014-10-03 00:00:00.111111','2014-10-03 00:00:00.111111'),('JDR',12.94,'2014-10-04 00:00:00','2014-10-04 00:00:00.111111','2014-10-04 00:00:00.111111','2014-10-04 00:00:00.111111'),('JDR',12.55,'2014-10-05 00:00:00','2014-10-05 00:00:00.111111','2014-10-05 00:00:00.111111','2014-10-05 00:00:00.111111'),('JDR',14.03,'2014-10-06 00:00:00','2014-10-06 00:00:00.111111','2014-10-06 00:00:00.111111','2014-10-06 00:00:00.111111'),('JDR',14.75,'2014-10-07 00:00:00','2014-10-07 00:00:00.111111','2014-10-07 00:00:00.111111','2014-10-07 00:00:00.111111'),('JDR',13.98,'2014-10-08 00:00:00','2014-10-08 00:00:00.111111','2014-10-08 00:00:00.111111','2014-10-08 00:00:00.111111') """ + qt_sql """SELECT row_number() OVER (partition by 1 order by 2) from ${windowFunctionTable1} order by 1; """ // Nereids does't support window function // qt_sql """ // SELECT /*+SET_VAR(parallel_fragment_exec_instance_num=1) */ diff --git a/regression-test/suites/nereids_rules_p0/partition_prune/test_partition_unique_model.groovy b/regression-test/suites/nereids_rules_p0/partition_prune/test_partition_unique_model.groovy new file mode 100644 index 00000000000000..3fc77f2cf9cd20 --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/partition_prune/test_partition_unique_model.groovy @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_partition_unique_model") { + // filter about invisible column "DORIS_DELETE_SIGN = 0" has no impaction on partition pruning + String db = context.config.getDbNameByFile(context.file) + sql "use ${db}" + sql "SET enable_nereids_planner=true" + sql "SET enable_fallback_to_original_planner=false" + sql "set partition_pruning_expand_threshold=10;" + sql "drop table if exists xinan;" + sql """ + create table xinan + ( + init_date int null + ) + engine=olap + unique key(init_date) + partition by range(init_date) + (partition p202209 values[("20220901"), ("20221001")), + partition p202210 values[("20221001"), ("20221101"))) + distributed by hash (init_date) buckets auto + properties( + "replication_allocation" = "tag.location.default: 1" + ); + """ + sql "insert into xinan values(20220901), (20221003);" + + explain { + sql "select * from xinan where init_date >=20221001;" + contains "partitions=1/2" + } + + explain { + sql "select * from xinan where init_date<20221101;" + contains "partitions=2/2" + } + + explain { + sql "select * from xinan where init_date >=20221001 and init_date<20221101;" + contains "partitions=1/2" + } + +} \ No newline at end of file diff --git a/regression-test/suites/schema_change_p0/test_alter_parition.groovy b/regression-test/suites/schema_change_p0/test_alter_parition.groovy deleted file mode 100644 index 98439c007e320a..00000000000000 --- a/regression-test/suites/schema_change_p0/test_alter_parition.groovy +++ /dev/null @@ -1,65 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_alter_partition") { - def tbName = "test_alter_partition" - - sql "DROP TABLE IF EXISTS ${tbName} FORCE" - sql """ - CREATE TABLE ${tbName} ( - `user_id` LARGEINT NOT NULL COMMENT "用户id", - `date` DATE NOT NULL COMMENT "数据灌入日期时间", - `city` VARCHAR(20) COMMENT "用户所在城市", - `age` SMALLINT COMMENT "用户年龄", - `sex` TINYINT COMMENT "用户性别", - - `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费", - `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间", - `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间", - `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列", - `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT "bitmap列") - AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) - PARTITION BY RANGE(`date`) - ( - PARTITION `p201701` VALUES LESS THAN ("2017-02-01"), - PARTITION `p201702` VALUES LESS THAN ("2017-03-01"), - PARTITION `p201703` VALUES LESS THAN ("2017-04-01") - ) - DISTRIBUTED BY HASH(`user_id`) - BUCKETS 1 - PROPERTIES ( - "replication_num" = "1" - ); - ; - """ - - sql """ INSERT INTO ${tbName} VALUES - (1, '2017-01-01', 'Beijing', 10, 1, 1, 30, 20, hll_hash(1), to_bitmap(1)), - (2, '2017-02-01', 'Beijing', 10, 1, 1, 31, 19, hll_hash(2), to_bitmap(2)), - (3, '2017-03-01', 'Beijing', 10, 1, 1, 31, 21, hll_hash(2), to_bitmap(2)) - """ - - qt_select """ select * from ${tbName} order by user_id""" - - // modify in_memory property - // https://github.com/apache/doris/pull/18731 - test { - sql """ALTER TABLE ${tbName} MODIFY PARTITION p201701 SET ("in_memory" = "true");""" - exception "Not support set 'in_memory'='true' now!" - } - sql "DROP TABLE IF EXISTS ${tbName} FORCE" -} \ No newline at end of file diff --git a/regression-test/suites/schema_change_p0/test_alter_table_add_columns.groovy b/regression-test/suites/schema_change_p0/test_alter_table_add_columns.groovy deleted file mode 100644 index 33484f95bf882b..00000000000000 --- a/regression-test/suites/schema_change_p0/test_alter_table_add_columns.groovy +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_alter_table_add_columns") { - def tbName = "test_alter_table_add_columns" - - sql "DROP TABLE IF EXISTS ${tbName} FORCE" - sql """ - CREATE TABLE `${tbName}` - ( - `siteid` INT DEFAULT '10', - `citycode` SMALLINT, - `username` VARCHAR(32) DEFAULT 'test', - `pv` BIGINT SUM DEFAULT '0' - ) - AGGREGATE KEY(`siteid`, `citycode`, `username`) - DISTRIBUTED BY HASH(siteid) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1" - ); - """ - - sql """ INSERT INTO ${tbName} VALUES - (1, 1, "xxx", 1), - (2, 2, "xxx", 2), - (3, 3, "xxx", 3); - """ - - qt_order """ select * from ${tbName} order by siteid""" - - // Add two value column light schema change is true - sql """ alter table ${tbName} ADD COLUMN (new_v1 INT MAX default "1" , new_v2 INT MAX default "2");""" - - qt_order """ select * from ${tbName} order by siteid""" - - sql """ INSERT INTO ${tbName} VALUES - (4, 4, "yyy", 4, 4, 4), - (5, 5, "yyy", 5, 5, 5), - (6, 6, "yyy", 6, 6, 6); - """ - - qt_order """ select * from ${tbName} order by siteid""" - - // Add one value column light schema change is false - sleep(1000) - sql """ alter table ${tbName} ADD COLUMN (new_k1 INT DEFAULT '1', new_k2 INT DEFAULT '2');""" - def waitSchemaChangeJob = { String tableName /* param */ -> - int tryTimes = 30 - while (tryTimes-- > 0) { - def jobResult = sql """SHOW ALTER TABLE COLUMN WHERE IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ - def jobState = jobResult[0][9].toString() - if ('cancelled'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - throw new IllegalStateException("${tableName}'s job has been cancelled") - } - if ('finished'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - break - } - sleep(10000) - } - } - - waitSchemaChangeJob(tbName) - qt_sql """ DESC ${tbName}""" - - sql """ INSERT INTO ${tbName} VALUES - (7, 7, "zzz", 7, 7, 7, 7, 7), - (8, 8, "zzz", 8, 8, 8, 8, 8), - (9, 9, "zzz", 9, 9, 9, 9, 9); - """ - qt_order """ select * from ${tbName} order by siteid""" - sql "DROP TABLE IF EXISTS ${tbName} FORCE" -} \ No newline at end of file diff --git a/regression-test/suites/schema_change_p0/test_alter_table_drop_column.groovy b/regression-test/suites/schema_change_p0/test_alter_table_drop_column.groovy deleted file mode 100644 index 433cbadcddf435..00000000000000 --- a/regression-test/suites/schema_change_p0/test_alter_table_drop_column.groovy +++ /dev/null @@ -1,176 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_alter_table_drop_column") { - // unique model table - def uniqueTableName = "test_alter_table_drop_column_unique" - def uniqueTableRollupName = "test_alter_table_drop_column_rollup_unique" - - sql "DROP TABLE IF EXISTS ${uniqueTableName} FORCE" - sql """ - CREATE TABLE `${uniqueTableName}` - ( - `siteid` INT DEFAULT '10', - `citycode` SMALLINT, - `username` VARCHAR(32) DEFAULT 'test', - `pv` BIGINT DEFAULT '0' - ) - UNIQUE KEY(`siteid`, `citycode`, `username`) - DISTRIBUTED BY HASH(siteid) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "bloom_filter_columns" = "pv" - ); - """ - - sql "ALTER TABLE ${uniqueTableName} ADD ROLLUP ${uniqueTableRollupName}(`citycode`,`siteid`,`username`,`pv`);" - def waitRollupJob = { String tableName /* param */ -> - int tryTimes = 30 - while (tryTimes-- > 0) { - def jobResult = sql """SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1""" - def jobState = jobResult[0][8].toString() - if ('cancelled'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - throw new IllegalStateException("${tableName}'s job has been cancelled") - } - if ('finished'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - return; - } - sleep(10000) - } - assertTrue(false) - } - - waitRollupJob(uniqueTableName) - - sql """ INSERT INTO ${uniqueTableName} VALUES - (1, 1, "xxx", 1), - (2, 2, "xxx", 2), - (3, 3, "xxx", 3); - """ - - qt_order """ select * from ${uniqueTableName} order by siteid""" - qt_order """ select * from ${uniqueTableName} order by citycode""" - - test { - sql """ alter table ${uniqueTableName} drop COLUMN siteid;""" - // check exception message contains - exception "Can not drop key column in Unique data model table" - } - - def waitSchemaChangeJob = { String tableName /* param */ -> - int tryTimes = 30 - while (tryTimes-- > 0) { - def jobResult = sql """SHOW ALTER TABLE COLUMN WHERE IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ - def jobState = jobResult[0][9].toString() - if ('cancelled'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - throw new IllegalStateException("${tableName}'s job has been cancelled") - } - if ('finished'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - return - } - sleep(10000) - } - assertTrue(false) - } - - sql """ alter table ${uniqueTableName} drop COLUMN pv from ${uniqueTableRollupName};""" - waitSchemaChangeJob(uniqueTableName) - - qt_order """ select * from ${uniqueTableName} order by siteid""" - qt_order """ select * from ${uniqueTableName} order by citycode""" - - sql "DROP TABLE IF EXISTS ${uniqueTableName} FORCE" - - // aggregage model table - def aggTableName = "test_alter_table_drop_column_agg" - def aggTableRollupName = "test_alter_table_drop_column_rollup_agg" - - sql "DROP TABLE IF EXISTS ${aggTableName} FORCE" - sql """ - CREATE TABLE `${aggTableName}` - ( - `siteid` INT DEFAULT '10', - `citycode` SMALLINT, - `username` VARCHAR(32) DEFAULT 'test', - `pv` BIGINT REPLACE DEFAULT '0' - ) - AGGREGATE KEY(`siteid`, `citycode`, `username`) - DISTRIBUTED BY HASH(siteid) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1" - ); - """ - sql "ALTER TABLE ${aggTableName} ADD ROLLUP ${aggTableRollupName}(`citycode`,`siteid`,`username`,`pv`);" - waitRollupJob(aggTableName) - sql """ INSERT INTO ${aggTableName} VALUES - (1, 1, "xxx", 1), - (2, 2, "xxx", 2), - (3, 3, "xxx", 3); - """ - - qt_order """ select * from ${aggTableName} order by siteid""" - qt_order """ select * from ${aggTableName} order by citycode""" - - test { - sql """ alter table ${aggTableName} drop COLUMN citycode from ${aggTableRollupName};""" - // check exception message contains - exception "Can not drop key column when rollup has value column with REPLACE aggregation method" - } - - sql """ alter table ${aggTableName} drop COLUMN pv from ${aggTableRollupName};""" - waitSchemaChangeJob(aggTableName) - - qt_order """ select * from ${aggTableName} order by siteid""" - qt_order """ select * from ${aggTableName} order by citycode""" - - test { - sql """ alter table ${aggTableName} drop COLUMN pv from ${aggTableRollupName};""" - // check exception message contains - exception "Column does not exists" - } - - // duplicate model table - def dupTableName = "test_alter_table_drop_column_dup" - - sql "DROP TABLE IF EXISTS ${dupTableName} FORCE" - sql """ - CREATE TABLE `${dupTableName}` - ( - `siteid` INT DEFAULT '10', - `citycode` SMALLINT, - `username` VARCHAR(32) DEFAULT 'test', - `pv` BIGINT SUM DEFAULT '0' - ) - DUPLICATE KEY(`siteid`, `citycode`, `username`) - DISTRIBUTED BY HASH(siteid) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1" - ); - """ - - test { - sql """alter table ${dupTableName} drop COLUMN siteid;""" - // check exception message contains - exception "Distribution column[siteid] cannot be dropped" - } - - sql "DROP TABLE IF EXISTS ${dupTableName} FORCE" -} \ No newline at end of file diff --git a/regression-test/suites/schema_change_p0/test_alter_table_modify_column.groovy b/regression-test/suites/schema_change_p0/test_alter_table_modify_column.groovy deleted file mode 100644 index 8df8e9112290d8..00000000000000 --- a/regression-test/suites/schema_change_p0/test_alter_table_modify_column.groovy +++ /dev/null @@ -1,219 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_alter_table_modify_column") { - def waitRollupJob = { String tableName /* param */ -> - int tryTimes = 30 - while (tryTimes-- > 0) { - def jobResult = sql """SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1""" - def jobState = jobResult[0][8].toString() - if ('cancelled'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - throw new IllegalStateException("${tableName}'s job has been cancelled") - } - if ('finished'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - return; - } - sleep(10000) - } - assertTrue(false) - } - - def waitSchemaChangeJob = { String tableName /* param */ -> - int tryTimes = 30 - while (tryTimes-- > 0) { - def jobResult = sql """SHOW ALTER TABLE COLUMN WHERE IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ - def jobState = jobResult[0][9].toString() - if ('cancelled'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - throw new IllegalStateException("${tableName}'s job has been cancelled") - } - if ('finished'.equalsIgnoreCase(jobState)) { - logger.info("jobResult:{}", jobResult) - return - } - sleep(10000) - } - assertTrue(false) - } - - // unique model table - def uniqueTableName = "test_alter_table_modify_column_unique" - - sql "DROP TABLE IF EXISTS ${uniqueTableName} FORCE;" - sql """ - CREATE TABLE `${uniqueTableName}` - ( - `siteid` INT DEFAULT '10', - `citycode` SMALLINT, - `username` VARCHAR(32) DEFAULT 'test', - `pv` BIGINT DEFAULT '0' - ) - UNIQUE KEY(`siteid`, `citycode`, `username`) - DISTRIBUTED BY HASH(siteid) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1" - ); - """ - - sql """ INSERT INTO ${uniqueTableName} VALUES - (1, 1, "xxx", 1), - (2, 2, "xxx", 2), - (3, 3, "xxx", 3); - """ - - qt_order """select * from ${uniqueTableName} order by siteid""" - - test { - sql """alter table ${uniqueTableName} modify COLUMN siteid INT SUM DEFAULT '0';""" - // check exception message contains - exception "Can not assign aggregation method on column in Unique data model table" - } - - sql "DROP TABLE IF EXISTS ${uniqueTableName} FORCE" - - // aggregate model table - def aggTableName = "test_alter_table_modify_column_agg" - - sql "DROP TABLE IF EXISTS ${aggTableName} FORCE" - sql """ - CREATE TABLE `${aggTableName}` - ( - `siteid` INT DEFAULT '10', - `citycode` SMALLINT, - `username` VARCHAR(32) DEFAULT 'test', - `pv` BIGINT SUM DEFAULT '0' - ) - AGGREGATE KEY(`siteid`, `citycode`, `username`) - DISTRIBUTED BY HASH(siteid) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1" - ); - """ - - sql """ INSERT INTO ${aggTableName} VALUES - (1, 1, "xxx", 1), - (2, 2, "xxx", 2), - (3, 3, "xxx", 3); - """ - - qt_order """select * from ${aggTableName} order by siteid""" - - test { - sql """alter table ${aggTableName} modify COLUMN siteid INT key SUM DEFAULT '0';""" - // check exception message contains - exception "Key column can not set aggregation type" - } - - test { - sql """alter table ${aggTableName} modify COLUMN pv BIGINT DEFAULT '0';""" - // check exception message contains - exception "Can not change aggregation typ" - } - - sql "DROP TABLE IF EXISTS ${aggTableName} FORCE" - - // duplicate model table - def dupTableName = "test_alter_table_modify_column_dup" - def dupTableRollupName = "test_alter_table_modify_column_dup_rollup" - - sql "DROP TABLE IF EXISTS ${dupTableName} FORCE" - sql """ - CREATE TABLE `${dupTableName}` - ( - `citycode` SMALLINT DEFAULT '10', - `siteid` INT DEFAULT '10', - `username` VARCHAR(32) DEFAULT 'test', - `pv` BIGINT SUM DEFAULT '0' - ) - DUPLICATE KEY(`siteid`, `citycode`, `username`) - DISTRIBUTED BY HASH(siteid) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1" - ); - """ - - sql "ALTER TABLE ${dupTableName} ADD ROLLUP ${dupTableRollupName}(`siteid`,`citycode`,`username`,`pv`);" - waitRollupJob(dupTableName) - - sql """ INSERT INTO ${dupTableName} VALUES - (1, 1, "xxx", 1), - (2, 2, "xxx", 2), - (3, 3, "xxx", 3); - """ - - qt_order """select * from ${dupTableName} order by siteid""" - - test { - sql """alter table ${dupTableName} modify COLUMN siteid INT SUM DEFAULT '0';""" - // check exception message contains - exception "Can not assign aggregation method on column in Duplicate data model table" - } - - test { - sql """alter table ${dupTableName} modify COLUMN siteid BIGINT from not_exist_rollup;""" - // check exception message contains - exception "Index[not_exist_rollup] does not exist in table" - } - - test { - sql """alter table ${dupTableName} modify COLUMN not_exist_column BIGINT;""" - // check exception message contains - exception "Column[not_exist_column] does not exists" - } - - test { - sql """alter table ${dupTableName} modify COLUMN not_exist_column BIGINT from ${dupTableRollupName};""" - // check exception message contains - exception "Do not need to specify index name when just modifying column type" - } - - test { - sql """alter table ${dupTableName} modify COLUMN siteid BIGINT after not_exist_column;""" - // check exception message contains - exception "Column[not_exist_column] does not exists" - } - - test { - sql """alter table ${dupTableName} modify COLUMN citycode BIGINT DEFAULT '10' first;""" - // check exception message contains - exception "Invalid column order. value should be after key" - } - - test { - sql """alter table ${dupTableName} modify COLUMN siteid BIGINT key DEFAULT '10' first;""" - // check exception message contains - exception "Can not modify distribution column" - } - - sql """alter table ${dupTableName} modify COLUMN username VARCHAR(32) key DEFAULT 'test' first;""" - waitSchemaChangeJob(dupTableName) - - sql """ INSERT INTO ${dupTableName} VALUES - ("yyy", 4, 4, 4) - """ - qt_order """select * from ${dupTableName} order by siteid""" - - sql """alter table ${dupTableName} order by(citycode, siteid, username, pv);""" - waitSchemaChangeJob(dupTableName) - sql """ INSERT INTO ${dupTableName} VALUES - (5, 5, "zzz", 5) - """ - qt_order """select * from ${dupTableName} order by siteid""" - sql "DROP TABLE IF EXISTS ${dupTableName} FORCE" -} \ No newline at end of file diff --git a/regression-test/suites/schema_change_p0/test_alter_table_replace.groovy b/regression-test/suites/schema_change_p0/test_alter_table_replace.groovy deleted file mode 100644 index b07a54c528865a..00000000000000 --- a/regression-test/suites/schema_change_p0/test_alter_table_replace.groovy +++ /dev/null @@ -1,104 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_alter_table_replace") { - def tbNameA = "test_alter_table_replace_a" - def tbNameB = "test_alter_table_replace_b" - - sql "DROP TABLE IF EXISTS ${tbNameA}" - sql "DROP TABLE IF EXISTS ${tbNameB}" - sql """ - CREATE TABLE ${tbNameA} ( - `user_id` LARGEINT NOT NULL COMMENT "用户id", - `date` DATE NOT NULL COMMENT "数据灌入日期时间", - `city` VARCHAR(20) COMMENT "用户所在城市", - `age` SMALLINT COMMENT "用户年龄", - `sex` TINYINT COMMENT "用户性别", - - `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费", - `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间", - `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间", - `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列", - `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT "bitmap列") - AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) - PARTITION BY RANGE(`date`) - ( - PARTITION `p201701` VALUES LESS THAN ("2017-02-01"), - PARTITION `p201702` VALUES LESS THAN ("2017-03-01"), - PARTITION `p201703` VALUES LESS THAN ("2017-04-01") - ) - DISTRIBUTED BY HASH(`user_id`) - BUCKETS 1 - PROPERTIES ( "replication_num" = "1"); - """ - - sql """ - CREATE TABLE ${tbNameB} ( - `user_id` LARGEINT NOT NULL COMMENT "用户id", - `date` DATE NOT NULL COMMENT "数据灌入日期时间", - `city` VARCHAR(20) COMMENT "用户所在城市", - `age` SMALLINT COMMENT "用户年龄", - `sex` TINYINT COMMENT "用户性别", - - `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费", - `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间", - `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间", - `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列", - `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT "bitmap列") - AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) - PARTITION BY RANGE(`date`) - ( - PARTITION `p201701` VALUES LESS THAN ("2017-02-01"), - PARTITION `p201702` VALUES LESS THAN ("2017-03-01"), - PARTITION `p201703` VALUES LESS THAN ("2017-04-01") - ) - DISTRIBUTED BY HASH(`user_id`) - BUCKETS 1 - PROPERTIES ( "replication_num" = "1"); - """ - - sql """ INSERT INTO ${tbNameA} VALUES - (1, '2017-01-01', 'Beijing', 10, 1, 1, 30, 20, hll_hash(1), to_bitmap(1)), - (2, '2017-02-01', 'Beijing', 10, 1, 1, 31, 19, hll_hash(2), to_bitmap(2)), - (3, '2017-03-01', 'Beijing', 10, 1, 1, 31, 21, hll_hash(2), to_bitmap(2)) - """ - - sql """ INSERT INTO ${tbNameB} VALUES - (4, '2017-01-01', 'Beijing', 10, 1, 1, 30, 20, hll_hash(1), to_bitmap(1)), - (5, '2017-02-01', 'Beijing', 10, 1, 1, 31, 19, hll_hash(2), to_bitmap(2)), - (6, '2017-03-01', 'Beijing', 10, 1, 1, 31, 21, hll_hash(2), to_bitmap(2)) - """ - - qt_select """ select * from ${tbNameA} order by user_id""" - qt_select """ select * from ${tbNameB} order by user_id""" - - sql """ALTER TABLE ${tbNameA} REPLACE WITH TABLE ${tbNameB} PROPERTIES('swap' = 'true');""" - - qt_select """ select * from ${tbNameA} order by user_id""" - qt_select """ select * from ${tbNameB} order by user_id""" - - sql """ALTER TABLE ${tbNameA} REPLACE WITH TABLE ${tbNameB} PROPERTIES('swap' = 'false');""" - qt_select """ select * from ${tbNameA} order by user_id""" - test { - sql """ select * from ${tbNameB} order by user_id""" - // check exception message contains - exception "Unknown table '${tbNameB}'" - } - - sql "DROP TABLE IF EXISTS ${tbNameA} FORCE;" - sql "DROP TABLE IF EXISTS ${tbNameB} FORCE;" -}