diff --git a/docs/.vuepress/sidebar/zh-CN.js b/docs/.vuepress/sidebar/zh-CN.js index f6fa16d4634a0b..26a3b61e4b687b 100644 --- a/docs/.vuepress/sidebar/zh-CN.js +++ b/docs/.vuepress/sidebar/zh-CN.js @@ -53,6 +53,7 @@ module.exports = [ "stream-load-manual", "routine-load-manual", "insert-into-manual", + "spark-load-manual", "delete-manual", ], sidebarDepth: 2, @@ -116,6 +117,7 @@ module.exports = [ "export-manual", "outfile", "privilege", + "resource-management", "segment-v2-usage", "small-file-mgr", "sql-mode", @@ -355,6 +357,7 @@ module.exports = [ "CREATE INDEX", "CREATE MATERIALIZED VIEW", "CREATE REPOSITORY", + "CREATE RESOURCE", "CREATE TABLE", "CREATE VIEW", "create-function", @@ -362,6 +365,7 @@ module.exports = [ "DROP INDEX", "DROP MATERIALIZED VIEW", "DROP REPOSITORY", + "DROP RESOURCE", "DROP TABLE", "DROP VIEW", "drop-function", @@ -369,6 +373,7 @@ module.exports = [ "RECOVER", "RESTORE", "show-functions", + "SHOW RESOURCES", "TRUNCATE TABLE", ], }, diff --git a/docs/zh-CN/administrator-guide/load-data/spark-load-manual.md b/docs/zh-CN/administrator-guide/load-data/spark-load-manual.md new file mode 100644 index 00000000000000..1efc8455720955 --- /dev/null +++ b/docs/zh-CN/administrator-guide/load-data/spark-load-manual.md @@ -0,0 +1,404 @@ +--- +{ + "title": "Spark Load", + "language": "zh-CN" +} +--- + + + +# Spark Load + +Spark load 通过 Spark 实现对导入数据的预处理,提高 Doris 大数据量的导入性能并且节省 Doris 集群的计算资源。主要用于初次迁移,大数据量导入 Doris 的场景。 + +Spark load 是一种异步导入方式,用户需要通过 MySQL 协议创建 Spark 类型导入任务,并通过 `SHOW LOAD` 查看导入结果。 + + + +## 适用场景 + +* 源数据在 Spark 可以访问的存储系统中,如 HDFS。 +* 数据量在 几十 GB 到 TB 级别。 + + + +## 名词解释 + +1. Frontend(FE):Doris 系统的元数据和调度节点。在导入流程中主要负责导入任务的调度工作。 +2. Backend(BE):Doris 系统的计算和存储节点。在导入流程中主要负责数据写入及存储。 +3. Spark ETL:在导入流程中主要负责数据的 ETL 工作,包括全局字典构建(BITMAP类型)、分区、排序、聚合等。 +4. Broker:Broker 为一个独立的无状态进程。封装了文件系统接口,提供 Doris 读取远端存储系统中文件的能力。 + + +## 基本原理 + +### 基本流程 + +用户通过 MySQL 客户端提交 Spark 类型导入任务,FE记录元数据并返回用户提交成功。 + +Spark load 任务的执行主要分为以下5个阶段。 + +1. FE 调度提交 ETL 任务到 Spark 集群执行。 +2. Spark 集群执行 ETL 完成对导入数据的预处理。包括全局字典构建(BITMAP类型)、分区、排序、聚合等。 +3. ETL 任务完成后,FE 获取预处理过的每个分片的数据路径,并调度相关的 BE 执行 Push 任务。 +4. BE 通过 Broker 读取数据,转化为 Doris 底层存储格式。 +5. FE 调度生效版本,完成导入任务。 + +``` + + + | 0. User create spark load job + +----v----+ + | FE |---------------------------------+ + +----+----+ | + | 3. FE send push tasks | + | 5. FE publish version | + +------------+------------+ | + | | | | ++---v---+ +---v---+ +---v---+ | +| BE | | BE | | BE | |1. FE submit Spark ETL job ++---^---+ +---^---+ +---^---+ | + |4. BE push with broker | | ++---+---+ +---+---+ +---+---+ | +|Broker | |Broker | |Broker | | ++---^---+ +---^---+ +---^---+ | + | | | | ++---+------------+------------+---+ 2.ETL +-------------v---------------+ +| HDFS +-------> Spark cluster | +| <-------+ | ++---------------------------------+ +-----------------------------+ + +``` + + + +### 全局字典 + +待补 + + + +### 数据预处理(DPP) + +待补 + + + +## 基本操作 + +### 配置 ETL 集群 + +Spark作为一种外部计算资源在Doris中用来完成ETL工作,未来可能还有其他的外部资源会加入到Doris中使用,如Spark/GPU用于查询,HDFS/S3用于外部存储,MapReduce用于ETL等,因此我们引入resource management来管理Doris使用的这些外部资源。 + +提交 Spark 导入任务之前,需要配置执行 ETL 任务的 Spark 集群。 + +语法: + +```sql +-- create spark resource +CREATE EXTERNAL RESOURCE resource_name +PROPERTIES +( + type = spark, + spark_conf_key = spark_conf_value, + working_dir = path, + broker = broker_name, + broker.property_key = property_value +) + +-- drop spark resource +DROP RESOURCE resource_name + +-- show resources +SHOW RESOURCES +SHOW PROC "/resources" + +-- privileges +GRANT USAGE_PRIV ON RESOURCE resource_name TO user_identity +GRANT USAGE_PRIV ON RESOURCE resource_name TO ROLE role_name + +REVOKE USAGE_PRIV ON RESOURCE resource_name FROM user_identity +REVOKE USAGE_PRIV ON RESOURCE resource_name FROM ROLE role_name +``` + +#### 创建资源 + +`resource_name` 为 Doris 中配置的 Spark 资源的名字。 + +`PROPERTIES` 是 Spark 资源相关参数,如下: + +- `type`:资源类型,必填,目前仅支持 spark。 + +- Spark 相关参数如下: + - `spark.master`: 必填,目前支持yarn,spark://host:port。 + - `spark.submit.deployMode`: Spark 程序的部署模式,必填,支持 cluster,client 两种。 + - `spark.hadoop.yarn.resourcemanager.address`: master为yarn时必填。 + - `spark.hadoop.fs.defaultFS`: master为yarn时必填。 + - 其他参数为可选,参考http://spark.apache.org/docs/latest/configuration.html +- `working_dir`: ETL 使用的目录。spark作为ETL资源使用时必填。例如:hdfs://host:port/tmp/doris。 +- `broker`: broker 名字。spark作为ETL资源使用时必填。需要使用`ALTER SYSTEM ADD BROKER` 命令提前完成配置。 + - `broker.property_key`: broker读取ETL生成的中间文件时需要指定的认证信息等。 + +示例: + +```sql +-- yarn cluster 模式 +CREATE EXTERNAL RESOURCE "spark0" +PROPERTIES +( + "type" = "spark", + "spark.master" = "yarn", + "spark.submit.deployMode" = "cluster", + "spark.jars" = "xxx.jar,yyy.jar", + "spark.files" = "/tmp/aaa,/tmp/bbb", + "spark.executor.memory" = "1g", + "spark.yarn.queue" = "queue0", + "spark.hadoop.yarn.resourcemanager.address" = "127.0.0.1:9999", + "spark.hadoop.fs.defaultFS" = "hdfs://127.0.0.1:10000", + "working_dir" = "hdfs://127.0.0.1:10000/tmp/doris", + "broker" = "broker0", + "broker.username" = "user0", + "broker.password" = "password0" +); + +-- spark standalone client 模式 +CREATE EXTERNAL RESOURCE "spark1" +PROPERTIES +( + "type" = "spark", + "spark.master" = "spark://127.0.0.1:7777", + "spark.submit.deployMode" = "client", + "working_dir" = "hdfs://127.0.0.1:10000/tmp/doris", + "broker" = "broker1" +); +``` + +#### 查看资源 + +普通账户只能看到自己有USAGE_PRIV使用权限的资源。 + +root和admin账户可以看到所有的资源。 + +#### 资源权限 + +资源权限通过GRANT REVOKE来管理,目前仅支持USAGE_PRIV使用权限。 + +可以将USAGE_PRIV权限赋予某个用户或者某个角色,角色的使用与之前一致。 +```sql +-- 授予spark0资源的使用权限给用户user0 +GRANT USAGE_PRIV ON RESOURCE "spark0" TO "user0"@"%"; + +-- 授予spark0资源的使用权限给角色role0 +GRANT USAGE_PRIV ON RESOURCE "spark0" TO ROLE "role0"; + +-- 授予所有资源的使用权限给用户user0 +GRANT USAGE_PRIV ON RESOURCE * TO "user0"@"%"; + +-- 授予所有资源的使用权限给角色role0 +GRANT USAGE_PRIV ON RESOURCE * TO ROLE "role0"; + +-- 撤销用户user0的spark0资源使用权限 +REVOKE USAGE_PRIV ON RESOURCE "spark0" FROM "user0"@"%"; +``` + + + +### 创建导入 + +语法: + +```sql +LOAD LABEL load_label + (data_desc, ...) + WITH RESOURCE resource_name resource_properties + [PROPERTIES (key1=value1, ... )] + +* load_label: + db_name.label_name + +* data_desc: + DATA INFILE ('file_path', ...) + [NEGATIVE] + INTO TABLE tbl_name + [PARTITION (p1, p2)] + [COLUMNS TERMINATED BY separator ] + [(col1, ...)] + [SET (k1=f1(xx), k2=f2(xx))] + [WHERE predicate] + +* resource_properties: + (key2=value2, ...) +``` +示例: + +```sql +LOAD LABEL db1.label1 +( + DATA INFILE("hdfs://abc.com:8888/user/palo/test/ml/file1") + INTO TABLE tbl1 + COLUMNS TERMINATED BY "," + (tmp_c1,tmp_c2) + SET + ( + id=tmp_c2, + name=tmp_c1 + ), + DATA INFILE("hdfs://abc.com:8888/user/palo/test/ml/file2") + INTO TABLE tbl2 + COLUMNS TERMINATED BY "," + (col1, col2) + where col1 > 1 +) +WITH RESOURCE 'spark0' +( + "spark.executor.memory" = "2g", + "spark.shuffle.compress" = "true" +) +PROPERTIES +( + "timeout" = "3600" +); + +``` + +创建导入的详细语法执行 ```HELP SPARK LOAD``` 查看语法帮助。这里主要介绍 Spark load 的创建导入语法中参数意义和注意事项。 + +#### Label + +导入任务的标识。每个导入任务,都有一个在单 database 内部唯一的 Label。具体规则与 `Broker Load` 一致。 + +#### 数据描述类参数 + +目前支持的数据源有CSV和hive table。其他规则与 `Broker Load` 一致。 + +#### 导入作业参数 + +导入作业参数主要指的是 Spark load 创建导入语句中的属于 ```opt_properties```部分的参数。导入作业参数是作用于整个导入作业的。规则与 `Broker Load` 一致。 + +#### Spark资源参数 + +Spark资源需要提前配置到 Doris系统中并且赋予用户USAGE_PRIV权限后才能使用 Spark load。 + +当用户有临时性的需求,比如增加任务使用的资源而修改 Spark configs,可以在这里设置,设置仅对本次任务生效,并不影响 Doris 集群中已有的配置。 + +```sql +WITH RESOURCE 'spark0' +( + "spark.driver.memory" = "1g", + "spark.executor.memory" = "3g" +) +``` + + + +### 查看导入 + +Spark load 导入方式同 Broker load 一样都是异步的,所以用户必须将创建导入的 Label 记录,并且在**查看导入命令中使用 Label 来查看导入结果**。查看导入命令在所有导入方式中是通用的,具体语法可执行 ```HELP SHOW LOAD``` 查看。 + +示例: + +``` +mysql> show load order by createtime desc limit 1\G +*************************** 1. row *************************** + JobId: 76391 + Label: label1 + State: FINISHED + Progress: ETL:100%; LOAD:100% + Type: SPARK + EtlInfo: unselected.rows=4; dpp.abnorm.ALL=15; dpp.norm.ALL=28133376 + TaskInfo: cluster:cluster0; timeout(s):10800; max_filter_ratio:5.0E-5 + ErrorMsg: N/A + CreateTime: 2019-07-27 11:46:42 + EtlStartTime: 2019-07-27 11:46:44 + EtlFinishTime: 2019-07-27 11:49:44 + LoadStartTime: 2019-07-27 11:49:44 +LoadFinishTime: 2019-07-27 11:50:16 + URL: http://1.1.1.1:8089/proxy/application_1586619723848_0035/ + JobDetails: {"ScannedRows":28133395,"TaskNumber":1,"FileNumber":1,"FileSize":200000} +``` + +返回结果集中参数意义可以参考 Broker load。不同点如下: + ++ State + + 导入任务当前所处的阶段。任务提交之后状态为 PENDING,提交 Spark ETL 之后状态变为 ETL,ETL 完成之后 FE 调度 BE 执行 push 操作状态变为 LOADING,push 完成并且版本生效后状态变为 FINISHED。 + + 导入任务的最终阶段有两个:CANCELLED 和 FINISHED,当 Load job 处于这两个阶段时导入完成。其中 CANCELLED 为导入失败,FINISHED 为导入成功。 + ++ Progress + + 导入任务的进度描述。分为两种进度:ETL 和 LOAD,对应了导入流程的两个阶段 ETL 和 LOADING。 + + LOAD 的进度范围为:0~100%。 + + ```LOAD 进度 = 当前已完成所有replica导入的tablet个数 / 本次导入任务的总tablet个数 * 100%``` + + **如果所有导入表均完成导入,此时 LOAD 的进度为 99%** 导入进入到最后生效阶段,整个导入完成后,LOAD 的进度才会改为 100%。 + + 导入进度并不是线性的。所以如果一段时间内进度没有变化,并不代表导入没有在执行。 + ++ Type + + 导入任务的类型。Spark load 为 SPARK。 + ++ CreateTime/EtlStartTime/EtlFinishTime/LoadStartTime/LoadFinishTime + + 这几个值分别代表导入创建的时间,ETL 阶段开始的时间,ETL 阶段完成的时间,LOADING 阶段开始的时间和整个导入任务完成的时间。 + ++ JobDetails + + 显示一些作业的详细运行状态,ETL 结束的时候更新。包括导入文件的个数、总大小(字节)、子任务个数、已处理的原始行数等。 + + ```{"ScannedRows":139264,"TaskNumber":1,"FileNumber":1,"FileSize":940754064}``` + +### 取消导入 + +当 Spark load 作业状态不为 CANCELLED 或 FINISHED 时,可以被用户手动取消。取消时需要指定待取消导入任务的 Label 。取消导入命令语法可执行 ```HELP CANCEL LOAD```查看。 + + + +## 相关系统配置 + +### FE 配置 + +下面配置属于 Spark load 的系统级别配置,也就是作用于所有 Spark load 导入任务的配置。主要通过修改 ``` fe.conf```来调整配置值。 + ++ spark_load_default_timeout_second + + 任务默认超时时间为259200秒(3天)。 + + + +## 最佳实践 + +### 应用场景 + +使用 Spark load 最适合的场景就是原始数据在文件系统(HDFS)中,数据量在 几十 GB 到 TB 级别。小数据量还是建议使用 Stream load 或者 Broker load。 + + + +## 常见问题 + +* 使用Spark load时需要在FE机器设置SPARK_HOME和HADOOP_CONF_DIR环境变量。 + +提交Spark job时用到spark-submit命令,如果SPARK_HOME环境变量没有设置,会报 `Spark home not found; set it explicitly or use the SPARK_HOME environment variable` 错误。 + +如果HADOOP_CONF_DIR环境变量没有设置,会报 `When running with master 'yarn' either HADOOP_CONF_DIR or YARN_CONF_DIR must be set in the environment.` 错误。 + + diff --git a/docs/zh-CN/administrator-guide/privilege.md b/docs/zh-CN/administrator-guide/privilege.md index 407e64189cfd53..1e59b2999c7c32 100644 --- a/docs/zh-CN/administrator-guide/privilege.md +++ b/docs/zh-CN/administrator-guide/privilege.md @@ -97,14 +97,24 @@ Doris 目前支持以下几种权限 删除数据库、表、视图的权限。 +8. Usage_priv + + 资源的使用权限。 + + ## 权限层级 -同时,根据权限适用范围的不同,我们将权限分为以下三个层级: +同时,根据权限适用范围的不同,我们将库表的权限分为以下三个层级: 1. GLOBAL LEVEL:全局权限。即通过 GRANT 语句授予的 `*.*` 上的权限。被授予的权限适用于任意数据库中的任意表。 2. DATABASE LEVEL:数据库级权限。即通过 GRANT 语句授予的 `db.*` 上的权限。被授予的权限适用于指定数据库中的任意表。 3. TABLE LEVEL:表级权限。即通过 GRANT 语句授予的 `db.tbl` 上的权限。被授予的权限适用于指定数据库中的指定表。 +将资源的权限分为以下两个层级: + +1. GLOBAL LEVEL:全局权限。即通过 GRANT 语句授予的 `*` 上的权限。被授予的权限适用于资源。 +2. RESOURCE LEVEL: 资源级权限。即通过 GRANT 语句授予的 `resource_name` 上的权限。被授予的权限适用于指定资源。 + ## ADMIN/GRANT 权限说明 diff --git a/docs/zh-CN/administrator-guide/resource-management.md b/docs/zh-CN/administrator-guide/resource-management.md new file mode 100644 index 00000000000000..766e105fc68fff --- /dev/null +++ b/docs/zh-CN/administrator-guide/resource-management.md @@ -0,0 +1,125 @@ +--- +{ + "title": "资源管理", + "language": "zh-CN" +} +--- + + + +# 资源管理 + +为了节省Doris集群内的计算、存储资源,Doris需要引入一些其他外部资源来完成相关的工作,如Spark/GPU用于查询,HDFS/S3用于外部存储,Spark/MapReduce用于ETL等,因此我们引入资源管理机制来管理Doris使用的这些外部资源。 + + + +## 基本概念 + +一个资源包含名字、类型等基本信息,名字为全局唯一,不同类型的资源包含不同的属性,具体参考各资源的介绍。 + +资源的创建和删除只能由拥有 `admin` 权限的用户进行操作。一个资源隶属于整个Doris集群。拥有 `admin` 权限的用户可以将使用权限`usage_priv` 赋给普通用户。可参考`HELP GRANT`或者权限文档。 + + + +## 具体操作 + +资源管理主要有三个命令:`CREATE RESOURCE`,`DROP RESOURCE` 和 `SHOW RESOURCES`,分别为创建、删除和查看资源。这三个命令的具体语法可以通过MySQL客户端连接到 Doris 后,执行 `HELP cmd` 的方式查看帮助。 + +1. CREATE RESOURCE + + 语法 + + ```sql + CREATE [EXTERNAL] RESOURCE "resource_name" + PROPERTIES ("key"="value", ...); + ``` + + 在创建资源的命令中,用户必须提供以下信息: + + * `resource_name` 为 Doris 中配置的资源的名字。 + * `PROPERTIES` 是资源相关参数,如下: + * `type`:资源类型,必填,目前仅支持 spark。 + * 其他参数见各资源介绍。 + +2. DROP RESOURCE + + 该命令可以删除一个已存在的资源。具体操作见:`HELP DROP RESOURCE` + +3. SHOW RESOURCES + + 该命令可以查看用户有使用权限的资源。具体操作见:`HELP SHOW RESOURCES` + + + +## 支持的资源 + +目前仅支持Spark资源,完成ETL工作。下面的示例都以Spark资源为例。 + +### Spark + +#### 参数 + +##### Spark 相关参数如下: + +`spark.master`: 必填,目前支持yarn,spark://host:port。 + +`spark.submit.deployMode`: Spark 程序的部署模式,必填,支持 cluster,client 两种。 + +`spark.hadoop.yarn.resourcemanager.address`: master为yarn时必填。 + +`spark.hadoop.fs.defaultFS`: master为yarn时必填。 + +其他参数为可选,参考http://spark.apache.org/docs/latest/configuration.html。 + + + +##### 如果Spark用于ETL,还需要指定以下参数: + +`working_dir`: ETL 使用的目录。spark作为ETL资源使用时必填。例如:hdfs://host:port/tmp/doris。 + +`broker`: broker 名字。spark作为ETL资源使用时必填。需要使用`ALTER SYSTEM ADD BROKER` 命令提前完成配置。 + + * `broker.property_key`: broker读取ETL生成的中间文件时需要指定的认证信息等。 + + + +#### 示例 + +创建 yarn cluster 模式,名为 spark0 的 Spark 资源。 + +```sql +CREATE EXTERNAL RESOURCE "spark0" +PROPERTIES +( + "type" = "spark", + "spark.master" = "yarn", + "spark.submit.deployMode" = "cluster", + "spark.jars" = "xxx.jar,yyy.jar", + "spark.files" = "/tmp/aaa,/tmp/bbb", + "spark.executor.memory" = "1g", + "spark.yarn.queue" = "queue0", + "spark.hadoop.yarn.resourcemanager.address" = "127.0.0.1:9999", + "spark.hadoop.fs.defaultFS" = "hdfs://127.0.0.1:10000", + "working_dir" = "hdfs://127.0.0.1:10000/tmp/doris", + "broker" = "broker0", + "broker.username" = "user0", + "broker.password" = "password0" +); +``` \ No newline at end of file diff --git a/docs/zh-CN/sql-reference/sql-statements/Account Management/GRANT.md b/docs/zh-CN/sql-reference/sql-statements/Account Management/GRANT.md index ef460589ef0242..75965541223754 100644 --- a/docs/zh-CN/sql-reference/sql-statements/Account Management/GRANT.md +++ b/docs/zh-CN/sql-reference/sql-statements/Account Management/GRANT.md @@ -33,6 +33,8 @@ Syntax: GRANT privilege_list ON db_name[.tbl_name] TO user_identity [ROLE role_name] + GRANT privilege_list ON RESOURCE resource_name TO user_identity [ROLE role_name] + privilege_list 是需要赋予的权限列表,以逗号分隔。当前 Doris 支持如下权限: @@ -44,10 +46,17 @@ privilege_list 是需要赋予的权限列表,以逗号分隔。当前 Doris ALTER_PRIV:对指定的库或表的schema变更权限 CREATE_PRIV:对指定的库或表的创建权限 DROP_PRIV:对指定的库或表的删除权限 + USAGE_PRIV: 对指定资源的使用权限 旧版权限中的 ALL 和 READ_WRITE 会被转换成:SELECT_PRIV,LOAD_PRIV,ALTER_PRIV,CREATE_PRIV,DROP_PRIV; READ_ONLY 会被转换为 SELECT_PRIV。 +权限分类: + + 1. 节点权限:NODE_PRIV + 2. 库表权限:SELECT_PRIV,LOAD_PRIV,ALTER_PRIV,CREATE_PRIV,DROP_PRIV + 3. 资源权限:USAGE_PRIV + db_name[.tbl_name] 支持以下三种形式: 1. *.* 权限可以应用于所有库及其中所有表 @@ -56,6 +65,13 @@ db_name[.tbl_name] 支持以下三种形式: 这里指定的库或表可以是不存在的库和表。 +resource_name 支持以下两种形式: + + 1. * 权限应用于所有资源 + 2. resource 权限应用于指定资源 + + 这里指定的资源可以是不存在的资源。 + user_identity: 这里的 user_identity 语法同 CREATE USER。且必须为使用 CREATE USER 创建过的 user_identity。user_identity 中的host可以是域名,如果是域名的话,权限的生效时间可能会有1分钟左右的延迟。 @@ -76,6 +92,18 @@ user_identity: GRANT LOAD_PRIV ON db1.* TO ROLE 'my_role'; + 4. 授予所有资源的使用权限给用户 + + GRANT USAGE_PRIV ON RESOURCE * TO 'jack'@'%'; + + 5. 授予指定资源的使用权限给用户 + + GRANT USAGE_PRIV ON RESOURCE 'spark_resource' TO 'jack'@'%'; + + 6. 授予指定资源的使用权限给角色 + + GRANT USAGE_PRIV ON RESOURCE 'spark_resource' TO ROLE 'my_role'; + ## keyword GRANT diff --git a/docs/zh-CN/sql-reference/sql-statements/Account Management/REVOKE.md b/docs/zh-CN/sql-reference/sql-statements/Account Management/REVOKE.md index d016b3b5107dc4..4a8168ee7be150 100644 --- a/docs/zh-CN/sql-reference/sql-statements/Account Management/REVOKE.md +++ b/docs/zh-CN/sql-reference/sql-statements/Account Management/REVOKE.md @@ -30,6 +30,8 @@ under the License. REVOKE 命令用于撤销指定用户或角色指定的权限。 Syntax: REVOKE privilege_list ON db_name[.tbl_name] FROM user_identity [ROLE role_name] + + REVOKE privilege_list ON RESOURCE resource_name FROM user_identity [ROLE role_name] user_identity: @@ -43,6 +45,10 @@ under the License. REVOKE SELECT_PRIV ON db1.* FROM 'jack'@'192.%'; + 1. 撤销用户 jack 资源 spark_resource 的使用权限 + + REVOKE USAGE_PRIV ON RESOURCE 'spark_resource' FROM 'jack'@'192.%'; + ## keyword REVOKE diff --git a/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE RESOURCE.md b/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE RESOURCE.md new file mode 100644 index 00000000000000..58828c5d4b9e44 --- /dev/null +++ b/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE RESOURCE.md @@ -0,0 +1,66 @@ + + +# CREATE RESOURCE +## description + 该语句用于创建资源。仅 root 或 admin 用户可以创建资源。目前仅支持 Spark 外部资源。将来其他外部资源可能会加入到 Doris 中使用,如 Spark/GPU 用于查询,HDFS/S3 用于外部存储,MapReduce 用于 ETL 等。 + 语法: + CREATE [EXTERNAL] RESOURCE "resource_name" + PROPERTIES ("key"="value", ...); + + 说明: + 1. PROPERTIES中需要指定资源的类型 "type" = "spark",目前仅支持 spark。 + 2. 根据资源类型的不同 PROPERTIES 有所不同,具体见示例。 + +## example + 1. 创建yarn cluster 模式,名为 spark0 的 Spark 资源。 + CREATE EXTERNAL RESOURCE "spark0" + PROPERTIES + ( + "type" = "spark", + "spark.master" = "yarn", + "spark.submit.deployMode" = "cluster", + "spark.jars" = "xxx.jar,yyy.jar", + "spark.files" = "/tmp/aaa,/tmp/bbb", + "spark.executor.memory" = "1g", + "spark.yarn.queue" = "queue0", + "spark.hadoop.yarn.resourcemanager.address" = "127.0.0.1:9999", + "spark.hadoop.fs.defaultFS" = "hdfs://127.0.0.1:10000", + "working_dir" = "hdfs://127.0.0.1:10000/tmp/doris", + "broker" = "broker0", + "broker.username" = "user0", + "broker.password" = "password0" + ); + + + Spark 相关参数如下: + 1. spark.master: 必填,目前支持yarn,spark://host:port。 + 2. spark.submit.deployMode: Spark 程序的部署模式,必填,支持 cluster,client 两种。 + 3. spark.hadoop.yarn.resourcemanager.address: master为yarn时必填。 + 4. spark.hadoop.fs.defaultFS: master为yarn时必填。 + 5. 其他参数为可选,参考http://spark.apache.org/docs/latest/configuration.html + + Spark 用于 ETL 时需要指定 working_dir 和 broker。说明如下: + working_dir: ETL 使用的目录。spark作为ETL资源使用时必填。例如:hdfs://host:port/tmp/doris。 + broker: broker 名字。spark作为ETL资源使用时必填。需要使用`ALTER SYSTEM ADD BROKER` 命令提前完成配置。 + broker.property_key: broker读取ETL生成的中间文件时需要指定的认证信息等。 + +## keyword + CREATE RESOURCE + diff --git a/docs/zh-CN/sql-reference/sql-statements/Data Definition/DROP RESOURCE.md b/docs/zh-CN/sql-reference/sql-statements/Data Definition/DROP RESOURCE.md new file mode 100644 index 00000000000000..79915da1253a60 --- /dev/null +++ b/docs/zh-CN/sql-reference/sql-statements/Data Definition/DROP RESOURCE.md @@ -0,0 +1,32 @@ + + +# DROP RESOURCE +## description + 该语句用于删除一个已有的资源。仅 root 或 admin 用户可以删除资源。 + 语法: + DROP RESOURCE 'resource_name' + +## example + 1. 删除名为 spark0 的 Spark 资源: + DROP RESOURCE 'spark0'; + +## keyword + DROP RESOURCE + diff --git a/docs/zh-CN/sql-reference/sql-statements/Data Definition/SHOW RESOURCES.md b/docs/zh-CN/sql-reference/sql-statements/Data Definition/SHOW RESOURCES.md new file mode 100644 index 00000000000000..61e0fb9a6a13b3 --- /dev/null +++ b/docs/zh-CN/sql-reference/sql-statements/Data Definition/SHOW RESOURCES.md @@ -0,0 +1,31 @@ + + +# SHOW RESOURCES +## description + + 该语句用于展示用户有使用权限的资源。普通用户仅能展示有使用权限的资源,root 或 admin 用户会展示所有的资源。 + + 语法 + + SHOW RESOURCES + + +## keyword + SHOW RESOURCES \ No newline at end of file diff --git a/fe/src/main/cup/sql_parser.cup b/fe/src/main/cup/sql_parser.cup index 17806d0312d379..e224d156b0853d 100644 --- a/fe/src/main/cup/sql_parser.cup +++ b/fe/src/main/cup/sql_parser.cup @@ -253,7 +253,7 @@ terminal String KW_ADD, KW_ADMIN, KW_AFTER, KW_AGGREGATE, KW_ALL, KW_ALTER, KW_A KW_PROC, KW_PROCEDURE, KW_PROCESSLIST, KW_PROPERTIES, KW_PROPERTY, KW_QUERY, KW_QUOTA, KW_RANDOM, KW_RANGE, KW_READ, KW_RECOVER, KW_REGEXP, KW_RELEASE, KW_RENAME, - KW_REPAIR, KW_REPEATABLE, KW_REPOSITORY, KW_REPOSITORIES, KW_REPLACE, KW_REPLACE_IF_NOT_NULL, KW_REPLICA, KW_RESOURCE, KW_RESTORE, KW_RETURNS, KW_RESUME, KW_REVOKE, + KW_REPAIR, KW_REPEATABLE, KW_REPOSITORY, KW_REPOSITORIES, KW_REPLACE, KW_REPLACE_IF_NOT_NULL, KW_REPLICA, KW_RESOURCE, KW_RESOURCES, KW_RESTORE, KW_RETURNS, KW_RESUME, KW_REVOKE, KW_RIGHT, KW_ROLE, KW_ROLES, KW_ROLLBACK, KW_ROLLUP, KW_ROUTINE, KW_ROW, KW_ROWS, KW_SCHEMA, KW_SCHEMAS, KW_SECOND, KW_SELECT, KW_SEMI, KW_SERIALIZABLE, KW_SESSION, KW_SET, KW_SETS, KW_SHOW, KW_SMALLINT, KW_SNAPSHOT, KW_SONAME, KW_SPLIT, KW_START, KW_STATUS, KW_STOP, KW_STORAGE, KW_STRING, @@ -432,6 +432,7 @@ nonterminal List opt_col_mapping_list; nonterminal ColumnSeparator opt_field_term, column_separator; nonterminal String opt_user_role; nonterminal TablePattern tbl_pattern; +nonterminal ResourcePattern resource_pattern; nonterminal String ident_or_star; // Routine load @@ -971,7 +972,7 @@ alter_system_clause ::= | KW_SET KW_LOAD KW_ERRORS KW_HUB opt_properties:properties {: RESULT = new AlterLoadErrorUrlClause(properties); - :} + :} ; alter_cluster_clause ::= @@ -1085,6 +1086,11 @@ create_stmt ::= {: RESULT = new AlterTableStmt(tableName, Lists.newArrayList(new CreateIndexClause(tableName, new IndexDef(indexName, cols, indexType, comment), false))); :} + /* resource */ + | KW_CREATE opt_external:isExternal KW_RESOURCE ident_or_text:resourceName opt_properties:properties + {: + RESULT = new CreateResourceStmt(isExternal, resourceName, properties); + :} ; opt_aggregate ::= @@ -1470,6 +1476,14 @@ grant_stmt ::= {: RESULT = new GrantStmt(null, role, tblPattern, privs); :} + | KW_GRANT privilege_list:privs KW_ON KW_RESOURCE resource_pattern:resourcePattern KW_TO user_identity:userId + {: + RESULT = new GrantStmt(userId, null, resourcePattern, privs); + :} + | KW_GRANT privilege_list:privs KW_ON KW_RESOURCE resource_pattern:resourcePattern KW_TO KW_ROLE STRING_LITERAL:role + {: + RESULT = new GrantStmt(null, role, resourcePattern, privs); + :} ; tbl_pattern ::= @@ -1483,6 +1497,17 @@ tbl_pattern ::= :} ; +resource_pattern ::= + ident_or_star:resourceName + {: + RESULT = new ResourcePattern(resourceName); + :} + | STRING_LITERAL:resourceName + {: + RESULT = new ResourcePattern(resourceName); + :} + ; + ident_or_star ::= STAR {: @@ -1504,6 +1529,14 @@ revoke_stmt ::= {: RESULT = new RevokeStmt(null, role, tblPattern, privs); :} + | KW_REVOKE privilege_list:privs KW_ON KW_RESOURCE resource_pattern:resourcePattern KW_FROM user_identity:userId + {: + RESULT = new RevokeStmt(userId, null, resourcePattern, privs); + :} + | KW_REVOKE privilege_list:privs KW_ON KW_RESOURCE resource_pattern:resourcePattern KW_FROM KW_ROLE STRING_LITERAL:role + {: + RESULT = new RevokeStmt(null, role, resourcePattern, privs); + :} ; // Drop statement @@ -1562,6 +1595,10 @@ drop_stmt ::= {: RESULT = new DropMaterializedViewStmt(ifExists, mvName, tableName); :} + | KW_DROP KW_RESOURCE ident_or_text:resourceName + {: + RESULT = new DropResourceStmt(resourceName); + :} ; // Recover statement @@ -2176,6 +2213,10 @@ show_param ::= {: RESULT = new ShowBrokerStmt(); :} + | KW_RESOURCES + {: + RESULT = new ShowResourcesStmt(); + :} | KW_BACKENDS {: RESULT = new ShowBackendsStmt(); @@ -4483,6 +4524,8 @@ keyword ::= {: RESULT = id; :} | KW_RESOURCE:id {: RESULT = id; :} + | KW_RESOURCES:id + {: RESULT = id; :} | KW_RESTORE:id {: RESULT = id; :} | KW_RETURNS:id diff --git a/fe/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java b/fe/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java index 690aa505602cb0..8e2175b8a66b8b 100644 --- a/fe/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java +++ b/fe/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java @@ -71,4 +71,3 @@ public String toString() { return toSql(); } } - diff --git a/fe/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java b/fe/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java new file mode 100644 index 00000000000000..02323a805f07fc --- /dev/null +++ b/fe/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.catalog.Catalog; +import org.apache.doris.catalog.Resource.ResourceType; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.FeNameFormat; +import org.apache.doris.common.UserException; +import org.apache.doris.common.util.PrintableMap; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.qe.ConnectContext; + +import java.util.Map; + +// CREATE [EXTERNAL] RESOURCE resource_name +// PROPERTIES (key1 = value1, ...) +public class CreateResourceStmt extends DdlStmt { + private static final String TYPE = "type"; + + private final boolean isExternal; + private final String resourceName; + private final Map properties; + private ResourceType resourceType; + + public CreateResourceStmt(boolean isExternal, String resourceName, Map properties) { + this.isExternal = isExternal; + this.resourceName = resourceName; + this.properties = properties; + this.resourceType = ResourceType.UNKNOWN; + } + + public String getResourceName() { + return resourceName; + } + + public Map getProperties() { + return properties; + } + + public ResourceType getResourceType() { + return resourceType; + } + + @Override + public void analyze(Analyzer analyzer) throws UserException { + super.analyze(analyzer); + + // check auth + if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); + } + + // check name + FeNameFormat.checkResourceName(resourceName); + + // check type in properties + if (properties == null || properties.isEmpty()) { + throw new AnalysisException("Resource properties can't be null"); + } + String type = properties.get(TYPE); + if (type == null) { + throw new AnalysisException("Resource type can't be null"); + } + resourceType = ResourceType.fromString(type); + if (resourceType == ResourceType.UNKNOWN) { + throw new AnalysisException("Unsupported resource type: " + type); + } + if (resourceType == ResourceType.SPARK && !isExternal) { + throw new AnalysisException("Spark is external resource"); + } + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("CREATE "); + if (isExternal) { + sb.append("EXTERNAL "); + } + sb.append("RESOURCE '").append(resourceName).append("' "); + sb.append("PROPERTIES(").append(new PrintableMap<>(properties, " = ", true, false)).append(")"); + return sb.toString(); + } +} + diff --git a/fe/src/main/java/org/apache/doris/analysis/DropResourceStmt.java b/fe/src/main/java/org/apache/doris/analysis/DropResourceStmt.java new file mode 100644 index 00000000000000..4b86b0cae422fa --- /dev/null +++ b/fe/src/main/java/org/apache/doris/analysis/DropResourceStmt.java @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.catalog.Catalog; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.FeNameFormat; +import org.apache.doris.common.UserException; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.qe.ConnectContext; + +// DROP RESOURCE resource_name +public class DropResourceStmt extends DdlStmt { + private String resourceName; + + public DropResourceStmt(String resourceName) { + this.resourceName = resourceName; + } + + public String getResourceName() { + return resourceName; + } + + @Override + public void analyze(Analyzer analyzer) throws UserException { + super.analyze(analyzer); + + // check auth + if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); + } + + FeNameFormat.checkResourceName(resourceName); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("DROP "); + sb.append("RESOURCE `").append(resourceName).append("`"); + return sb.toString(); + } +} diff --git a/fe/src/main/java/org/apache/doris/analysis/GrantStmt.java b/fe/src/main/java/org/apache/doris/analysis/GrantStmt.java index be332c31111287..3d7674a7ca6aa4 100644 --- a/fe/src/main/java/org/apache/doris/analysis/GrantStmt.java +++ b/fe/src/main/java/org/apache/doris/analysis/GrantStmt.java @@ -38,16 +38,34 @@ // GRANT STMT // GRANT privilege [, privilege] ON db.tbl TO user [ROLE 'role']; +// GRANT privilege [, privilege] ON RESOURCE 'resource' TO user [ROLE 'role']; public class GrantStmt extends DdlStmt { private UserIdentity userIdent; private String role; private TablePattern tblPattern; + private ResourcePattern resourcePattern; private List privileges; + // TODO(wyb): spark-load + public static boolean disableGrantResource = true; + public GrantStmt(UserIdentity userIdent, String role, TablePattern tblPattern, List privileges) { this.userIdent = userIdent; this.role = role; this.tblPattern = tblPattern; + this.resourcePattern = null; + PrivBitSet privs = PrivBitSet.of(); + for (AccessPrivilege accessPrivilege : privileges) { + privs.or(accessPrivilege.toPaloPrivilege()); + } + this.privileges = privs.toPrivilegeList(); + } + + public GrantStmt(UserIdentity userIdent, String role, ResourcePattern resourcePattern, List privileges) { + this.userIdent = userIdent; + this.role = role; + this.tblPattern = null; + this.resourcePattern = resourcePattern; PrivBitSet privs = PrivBitSet.of(); for (AccessPrivilege accessPrivilege : privileges) { privs.or(accessPrivilege.toPaloPrivilege()); @@ -63,6 +81,10 @@ public TablePattern getTblPattern() { return tblPattern; } + public ResourcePattern getResourcePattern() { + return resourcePattern; + } + public boolean hasRole() { return !Strings.isNullOrEmpty(role); } @@ -85,13 +107,25 @@ public void analyze(Analyzer analyzer) throws AnalysisException, UserException { role = ClusterNamespace.getFullName(analyzer.getClusterName(), role); } - tblPattern.analyze(analyzer.getClusterName()); + if (tblPattern != null) { + tblPattern.analyze(analyzer.getClusterName()); + } else { + // TODO(wyb): spark-load + if (disableGrantResource) { + throw new AnalysisException("GRANT ON RESOURCE is comming soon"); + } + resourcePattern.analyze(); + } if (privileges == null || privileges.isEmpty()) { throw new AnalysisException("No privileges in grant statement."); } - checkPrivileges(analyzer, privileges, role, tblPattern); + if (tblPattern != null) { + checkPrivileges(analyzer, privileges, role, tblPattern); + } else { + checkPrivileges(analyzer, privileges, role, resourcePattern); + } } /* @@ -102,9 +136,10 @@ public void analyze(Analyzer analyzer) throws AnalysisException, UserException { * 4. Only user with GLOBAL level's GRANT_PRIV can grant/revoke privileges to/from roles. * 5.1 User should has GLOBAL level GRANT_PRIV * 5.2 or user has DATABASE/TABLE level GRANT_PRIV if grant/revoke to/from certain database or table. + * 5.3 or user should has 'resource' GRANT_PRIV if grant/revoke to/from certain 'resource' */ public static void checkPrivileges(Analyzer analyzer, List privileges, - String role, TablePattern tblPattern) throws AnalysisException { + String role, TablePattern tblPattern) throws AnalysisException { // Rule 1 if (privileges.contains(PaloPrivilege.NODE_PRIV)) { throw new AnalysisException("Can not grant NODE_PRIV to any other users or roles"); @@ -139,11 +174,46 @@ public static void checkPrivileges(Analyzer analyzer, List privil } } + public static void checkPrivileges(Analyzer analyzer, List privileges, + String role, ResourcePattern resourcePattern) throws AnalysisException { + // Rule 1 + if (privileges.contains(PaloPrivilege.NODE_PRIV)) { + throw new AnalysisException("Can not grant NODE_PRIV to any other users or roles"); + } + + // Rule 2 + if (resourcePattern.getPrivLevel() != PrivLevel.GLOBAL && privileges.contains(PaloPrivilege.ADMIN_PRIV)) { + throw new AnalysisException("ADMIN_PRIV privilege can only be granted on resource *"); + } + + if (role != null) { + // Rule 3 and 4 + if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.GRANT)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); + } + } else { + // Rule 5.1 and 5.3 + if (resourcePattern.getPrivLevel() == PrivLevel.GLOBAL) { + if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.GRANT)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); + } + } else { + if (!Catalog.getCurrentCatalog().getAuth().checkResourcePriv(ConnectContext.get(), resourcePattern.getResourceName(), PrivPredicate.GRANT)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); + } + } + } + } + @Override public String toSql() { StringBuilder sb = new StringBuilder(); sb.append("GRANT ").append(Joiner.on(", ").join(privileges)); - sb.append(" ON ").append(tblPattern).append(" TO "); + if (tblPattern != null) { + sb.append(" ON ").append(tblPattern).append(" TO "); + } else { + sb.append(" ON RESOURCE '").append(resourcePattern).append("' TO "); + } if (!Strings.isNullOrEmpty(role)) { sb.append(" ROLE '").append(role).append("'"); } else { diff --git a/fe/src/main/java/org/apache/doris/analysis/ResourcePattern.java b/fe/src/main/java/org/apache/doris/analysis/ResourcePattern.java new file mode 100644 index 00000000000000..b0d2449521c835 --- /dev/null +++ b/fe/src/main/java/org/apache/doris/analysis/ResourcePattern.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.FeNameFormat; +import org.apache.doris.common.io.Text; +import org.apache.doris.common.io.Writable; +import org.apache.doris.mysql.privilege.PaloAuth.PrivLevel; + +import com.google.common.base.Strings; +import com.google.gson.annotations.SerializedName; +import org.apache.doris.persist.gson.GsonUtils; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +// only the following 2 formats are allowed +// * +// resource +public class ResourcePattern implements Writable { + @SerializedName(value = "resourceName") + private String resourceName; + + public static ResourcePattern ALL; + static { + ALL = new ResourcePattern("*"); + try { + ALL.analyze(); + } catch (AnalysisException e) { + // will not happen + } + } + + private ResourcePattern() { + } + + public ResourcePattern(String resourceName) { + this.resourceName = Strings.isNullOrEmpty(resourceName) ? "*" : resourceName; + } + + public String getResourceName() { + return resourceName; + } + + public PrivLevel getPrivLevel() { + if (resourceName.equals("*")) { + return PrivLevel.GLOBAL; + } else { + return PrivLevel.RESOURCE; + } + } + + public void analyze() throws AnalysisException { + if (!resourceName.equals("*")) { + FeNameFormat.checkResourceName(resourceName); + } + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof ResourcePattern)) { + return false; + } + ResourcePattern other = (ResourcePattern) obj; + return resourceName.equals(other.getResourceName()); + } + + @Override + public int hashCode() { + int result = 17; + result = 31 * result + resourceName.hashCode(); + return result; + } + + @Override + public String toString() { + return resourceName; + } + + @Override + public void write(DataOutput out) throws IOException { + String json = GsonUtils.GSON.toJson(this); + Text.writeString(out, json); + } + + public static ResourcePattern read(DataInput in) throws IOException { + String json = Text.readString(in); + return GsonUtils.GSON.fromJson(json, ResourcePattern.class); + } +} + diff --git a/fe/src/main/java/org/apache/doris/analysis/RevokeStmt.java b/fe/src/main/java/org/apache/doris/analysis/RevokeStmt.java index 5c7edb341b2173..61dd664ec88ab8 100644 --- a/fe/src/main/java/org/apache/doris/analysis/RevokeStmt.java +++ b/fe/src/main/java/org/apache/doris/analysis/RevokeStmt.java @@ -33,16 +33,31 @@ // revoke privilege from some user, this is an administrator operation. // // REVOKE privilege [, privilege] ON db.tbl FROM user [ROLE 'role']; +// REVOKE privilege [, privilege] ON resource 'resource' FROM user [ROLE 'role']; public class RevokeStmt extends DdlStmt { private UserIdentity userIdent; private String role; private TablePattern tblPattern; + private ResourcePattern resourcePattern; private List privileges; public RevokeStmt(UserIdentity userIdent, String role, TablePattern tblPattern, List privileges) { this.userIdent = userIdent; this.role = role; this.tblPattern = tblPattern; + this.resourcePattern = null; + PrivBitSet privs = PrivBitSet.of(); + for (AccessPrivilege accessPrivilege : privileges) { + privs.or(accessPrivilege.toPaloPrivilege()); + } + this.privileges = privs.toPrivilegeList(); + } + + public RevokeStmt(UserIdentity userIdent, String role, ResourcePattern resourcePattern, List privileges) { + this.userIdent = userIdent; + this.role = role; + this.tblPattern = null; + this.resourcePattern = resourcePattern; PrivBitSet privs = PrivBitSet.of(); for (AccessPrivilege accessPrivilege : privileges) { privs.or(accessPrivilege.toPaloPrivilege()); @@ -58,6 +73,10 @@ public TablePattern getTblPattern() { return tblPattern; } + public ResourcePattern getResourcePattern() { + return resourcePattern; + } + public String getQualifiedRole() { return role; } @@ -75,21 +94,37 @@ public void analyze(Analyzer analyzer) throws AnalysisException { role = ClusterNamespace.getFullName(analyzer.getClusterName(), role); } - tblPattern.analyze(analyzer.getClusterName()); + if (tblPattern != null) { + tblPattern.analyze(analyzer.getClusterName()); + } else { + // TODO(wyb): spark-load + if (GrantStmt.disableGrantResource) { + throw new AnalysisException("REVOKE ON RESOURCE is comming soon"); + } + resourcePattern.analyze(); + } if (privileges == null || privileges.isEmpty()) { throw new AnalysisException("No privileges in revoke statement."); } // Revoke operation obey the same rule as Grant operation. reuse the same method - GrantStmt.checkPrivileges(analyzer, privileges, role, tblPattern); + if (tblPattern != null) { + GrantStmt.checkPrivileges(analyzer, privileges, role, tblPattern); + } else { + GrantStmt.checkPrivileges(analyzer, privileges, role, resourcePattern); + } } @Override public String toSql() { StringBuilder sb = new StringBuilder(); sb.append("REVOKE ").append(Joiner.on(", ").join(privileges)); - sb.append(" ON ").append(tblPattern).append(" FROM "); + if (tblPattern != null) { + sb.append(" ON ").append(tblPattern).append(" FROM "); + } else { + sb.append(" ON RESOURCE '").append(resourcePattern).append("' FROM "); + } if (!Strings.isNullOrEmpty(role)) { sb.append(" ROLE '").append(role).append("'"); } else { diff --git a/fe/src/main/java/org/apache/doris/analysis/ShowResourcesStmt.java b/fe/src/main/java/org/apache/doris/analysis/ShowResourcesStmt.java new file mode 100644 index 00000000000000..5736e87518f964 --- /dev/null +++ b/fe/src/main/java/org/apache/doris/analysis/ShowResourcesStmt.java @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.catalog.Column; +import org.apache.doris.catalog.ResourceMgr; +import org.apache.doris.catalog.ScalarType; +import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.ShowResultSetMetaData; + +public class ShowResourcesStmt extends ShowStmt { + public ShowResourcesStmt() { + } + + @Override + public ShowResultSetMetaData getMetaData() { + ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); + for (String title : ResourceMgr.RESOURCE_PROC_NODE_TITLE_NAMES) { + builder.addColumn(new Column(title, ScalarType.createVarchar(30))); + } + return builder.build(); + } + + @Override + public RedirectStatus getRedirectStatus() { + if (ConnectContext.get().getSessionVariable().getForwardToMaster()) { + return RedirectStatus.FORWARD_NO_SYNC; + } else { + return RedirectStatus.NO_FORWARD; + } + } +} diff --git a/fe/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java b/fe/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java index fd586592fb765d..ccf84e2c95989a 100644 --- a/fe/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java +++ b/fe/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java @@ -37,6 +37,7 @@ public class ShowRolesStmt extends ShowStmt { builder.addColumn(new Column("GlobalPrivs", ScalarType.createVarchar(300))); builder.addColumn(new Column("DatabasePrivs", ScalarType.createVarchar(300))); builder.addColumn(new Column("TablePrivs", ScalarType.createVarchar(300))); + builder.addColumn(new Column("ResourcePrivs", ScalarType.createVarchar(300))); META_DATA = builder.build(); } diff --git a/fe/src/main/java/org/apache/doris/catalog/AccessPrivilege.java b/fe/src/main/java/org/apache/doris/catalog/AccessPrivilege.java index 22295c2e405b77..2d9bfa7b189a8b 100644 --- a/fe/src/main/java/org/apache/doris/catalog/AccessPrivilege.java +++ b/fe/src/main/java/org/apache/doris/catalog/AccessPrivilege.java @@ -37,7 +37,8 @@ public enum AccessPrivilege { ALTER_PRIV(8, "Privilege for alter database or table"), CREATE_PRIV(9, "Privilege for createing database or table"), DROP_PRIV(10, "Privilege for dropping database or table"), - ADMIN_PRIV(11, "All privileges except NODE_PRIV"); + ADMIN_PRIV(11, "All privileges except NODE_PRIV"), + USAGE_PRIV(12, "Privileage for use resource"); private int flag; private String desc; @@ -48,7 +49,7 @@ private AccessPrivilege(int flag, String desc) { } public PrivBitSet toPaloPrivilege() { - Preconditions.checkState(flag > 0 && flag < 12); + Preconditions.checkState(flag > 0 && flag < 13); switch (flag) { case 1: return PrivBitSet.of(PaloPrivilege.SELECT_PRIV); @@ -73,6 +74,8 @@ public PrivBitSet toPaloPrivilege() { return PrivBitSet.of(PaloPrivilege.DROP_PRIV); case 11: return PrivBitSet.of(PaloPrivilege.ADMIN_PRIV); + case 12: + return PrivBitSet.of(PaloPrivilege.USAGE_PRIV); default: return null; } diff --git a/fe/src/main/java/org/apache/doris/catalog/Catalog.java b/fe/src/main/java/org/apache/doris/catalog/Catalog.java index 19578ea6b00c7b..e610cca632a466 100755 --- a/fe/src/main/java/org/apache/doris/catalog/Catalog.java +++ b/fe/src/main/java/org/apache/doris/catalog/Catalog.java @@ -359,6 +359,7 @@ public class Catalog { private PullLoadJobMgr pullLoadJobMgr; private BrokerMgr brokerMgr; + private ResourceMgr resourceMgr; private GlobalTransactionMgr globalTransactionMgr; @@ -496,6 +497,7 @@ private Catalog() { this.pullLoadJobMgr = new PullLoadJobMgr(); this.brokerMgr = new BrokerMgr(); + this.resourceMgr = new ResourceMgr(); this.globalTransactionMgr = new GlobalTransactionMgr(this); this.tabletStatMgr = new TabletStatMgr(); @@ -567,6 +569,10 @@ public BrokerMgr getBrokerMgr() { return brokerMgr; } + public ResourceMgr getResourceMgr() { + return resourceMgr; + } + public static GlobalTransactionMgr getCurrentGlobalTransactionMgr() { return getCurrentCatalog().globalTransactionMgr; } @@ -1437,6 +1443,8 @@ public void loadImage(String imageDir) throws IOException, DdlException { checksum = loadColocateTableIndex(dis, checksum); checksum = loadRoutineLoadJobs(dis, checksum); checksum = loadLoadJobsV2(dis, checksum); + // TODO(wyb): spark-load + //checksum = loadResources(dis, checksum); checksum = loadSmallFiles(dis, checksum); checksum = loadPlugins(dis, checksum); checksum = loadDeleteHandler(dis, checksum); @@ -1838,6 +1846,17 @@ public long loadLoadJobsV2(DataInputStream in, long checksum) throws IOException return checksum; } + public long loadResources(DataInputStream in, long checksum) throws IOException { + // TODO(wyb): spark-load + /* + if (MetaContext.get().getMetaVersion() >= FeMetaVersion.new_version_by_wyb) { + resourceMgr = ResourceMgr.read(in); + } + LOG.info("finished replay resources from image"); + */ + return checksum; + } + public long loadSmallFiles(DataInputStream in, long checksum) throws IOException { if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_52) { smallFileMgr.readFields(in); @@ -1890,6 +1909,8 @@ public void saveImage(File curFile, long replayedJournalId) throws IOException { checksum = saveColocateTableIndex(dos, checksum); checksum = saveRoutineLoadJobs(dos, checksum); checksum = saveLoadJobsV2(dos, checksum); + // TODO(wyb): spark-load + //checksum = saveResources(dos, checksum); checksum = saveSmallFiles(dos, checksum); checksum = savePlugins(dos, checksum); checksum = saveDeleteHandler(dos, checksum); @@ -2161,6 +2182,11 @@ public long saveLoadJobsV2(DataOutputStream out, long checksum) throws IOExcepti return checksum; } + public long saveResources(DataOutputStream out, long checksum) throws IOException { + Catalog.getCurrentCatalog().getResourceMgr().write(out); + return checksum; + } + private long saveSmallFiles(DataOutputStream out, long checksum) throws IOException { smallFileMgr.write(out); return checksum; diff --git a/fe/src/main/java/org/apache/doris/catalog/Resource.java b/fe/src/main/java/org/apache/doris/catalog/Resource.java new file mode 100644 index 00000000000000..e140b9ff11d3bf --- /dev/null +++ b/fe/src/main/java/org/apache/doris/catalog/Resource.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.catalog; + +import org.apache.doris.analysis.CreateResourceStmt; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.io.Text; +import org.apache.doris.common.io.Writable; +import org.apache.doris.common.proc.BaseProcResult; +import org.apache.doris.persist.gson.GsonUtils; + +import com.google.gson.annotations.SerializedName; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Map; + +public abstract class Resource implements Writable { + public enum ResourceType { + UNKNOWN, + SPARK; + + public static ResourceType fromString(String resourceType) { + for (ResourceType type : ResourceType.values()) { + if (type.name().equalsIgnoreCase(resourceType)) { + return type; + } + } + return UNKNOWN; + } + } + + @SerializedName(value = "name") + protected String name; + @SerializedName(value = "type") + protected ResourceType type; + + public Resource(String name, ResourceType type) { + this.name = name; + this.type = type; + } + + public static Resource fromStmt(CreateResourceStmt stmt) throws DdlException { + Resource resource = null; + ResourceType type = stmt.getResourceType(); + switch (type) { + case SPARK: + resource = new SparkResource(stmt.getResourceName()); + break; + default: + throw new DdlException("Only support Spark resource."); + } + + resource.setProperties(stmt.getProperties()); + return resource; + } + + public String getName() { + return name; + } + + public ResourceType getType() { + return type; + } + + /** + * Set and check the properties in child resources + */ + protected abstract void setProperties(Map properties) throws DdlException; + + /** + * Fill BaseProcResult with different properties in child resources + * ResourceMgr.RESOURCE_PROC_NODE_TITLE_NAMES format: + * | Name | ResourceType | Key | Value | + */ + protected abstract void getProcNodeData(BaseProcResult result); + + @Override + public String toString() { + return GsonUtils.GSON.toJson(this); + } + + @Override + public void write(DataOutput out) throws IOException { + String json = GsonUtils.GSON.toJson(this); + Text.writeString(out, json); + } + + public static Resource read(DataInput in) throws IOException { + String json = Text.readString(in); + return GsonUtils.GSON.fromJson(json, Resource.class); + } +} + diff --git a/fe/src/main/java/org/apache/doris/catalog/ResourceMgr.java b/fe/src/main/java/org/apache/doris/catalog/ResourceMgr.java new file mode 100644 index 00000000000000..d974b16afa4ced --- /dev/null +++ b/fe/src/main/java/org/apache/doris/catalog/ResourceMgr.java @@ -0,0 +1,148 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.catalog; + +import org.apache.doris.analysis.CreateResourceStmt; +import org.apache.doris.analysis.DropResourceStmt; +import org.apache.doris.catalog.Resource.ResourceType; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.io.Text; +import org.apache.doris.common.io.Writable; +import org.apache.doris.common.proc.BaseProcResult; +import org.apache.doris.common.proc.ProcNodeInterface; +import org.apache.doris.common.proc.ProcResult; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.persist.gson.GsonUtils; +import org.apache.doris.qe.ConnectContext; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Maps; +import com.google.gson.annotations.SerializedName; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Resource manager is responsible for managing external resources used by Doris. + * For example, Spark/MapReduce used for ETL, Spark/GPU used for queries, HDFS/S3 used for external storage. + * Now only support Spark. + */ +public class ResourceMgr implements Writable { + private static final Logger LOG = LogManager.getLogger(ResourceMgr.class); + + public static final ImmutableList RESOURCE_PROC_NODE_TITLE_NAMES = new ImmutableList.Builder() + .add("Name").add("ResourceType").add("Key").add("Value") + .build(); + + // { resourceName -> Resource} + @SerializedName(value = "nameToResource") + private final Map nameToResource = Maps.newConcurrentMap(); + private final ResourceProcNode procNode = new ResourceProcNode(); + + public ResourceMgr() { + } + + public void createResource(CreateResourceStmt stmt) throws DdlException { + if (stmt.getResourceType() != ResourceType.SPARK) { + throw new DdlException("Only support Spark resource."); + } + + String resourceName = stmt.getResourceName(); + Resource resource = Resource.fromStmt(stmt); + if (nameToResource.putIfAbsent(resourceName, resource) != null) { + throw new DdlException("Resource(" + resourceName + ") already exist"); + } + // log add + Catalog.getInstance().getEditLog().logCreateResource(resource); + LOG.info("create resource success. resource: {}", resource); + } + + public void replayCreateResource(Resource resource) { + nameToResource.put(resource.getName(), resource); + } + + public void dropResource(DropResourceStmt stmt) throws DdlException { + String name = stmt.getResourceName(); + if (nameToResource.remove(name) == null) { + throw new DdlException("Resource(" + name + ") does not exist"); + } + + // log drop + Catalog.getInstance().getEditLog().logDropResource(name); + LOG.info("drop resource success. resource name: {}", name); + } + + public void replayDropResource(String name) { + nameToResource.remove(name); + } + + public boolean containsResource(String name) { + return nameToResource.containsKey(name); + } + + public Resource getResource(String name) { + return nameToResource.get(name); + } + + public int getResourceNum() { + return nameToResource.size(); + } + + public List> getResourcesInfo() { + return procNode.fetchResult().getRows(); + } + + public ResourceProcNode getProcNode() { + return procNode; + } + + @Override + public void write(DataOutput out) throws IOException { + String json = GsonUtils.GSON.toJson(this); + Text.writeString(out, json); + } + + public static ResourceMgr read(DataInput in) throws IOException { + String json = Text.readString(in); + return GsonUtils.GSON.fromJson(json, ResourceMgr.class); + } + + public class ResourceProcNode implements ProcNodeInterface { + @Override + public ProcResult fetchResult() { + BaseProcResult result = new BaseProcResult(); + result.setNames(RESOURCE_PROC_NODE_TITLE_NAMES); + + for (Map.Entry entry : nameToResource.entrySet()) { + Resource resource = entry.getValue(); + // check resource privs + if (!Catalog.getCurrentCatalog().getAuth().checkResourcePriv(ConnectContext.get(), resource.getName(), + PrivPredicate.SHOW)) { + continue; + } + resource.getProcNodeData(result); + } + return result; + } + } +} diff --git a/fe/src/main/java/org/apache/doris/catalog/SparkResource.java b/fe/src/main/java/org/apache/doris/catalog/SparkResource.java new file mode 100644 index 00000000000000..3d57914b2e25c3 --- /dev/null +++ b/fe/src/main/java/org/apache/doris/catalog/SparkResource.java @@ -0,0 +1,252 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.catalog; + +//import org.apache.doris.analysis.ResourceDesc; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.Pair; +import org.apache.doris.common.proc.BaseProcResult; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.gson.annotations.SerializedName; + +import java.util.Map; + +/** + * Spark resource for etl or query. + * working_dir and broker[.xxx] are optional and used in spark ETL. + * working_dir is used to store ETL intermediate files and broker is used to read the intermediate files by BE. + * + * Spark resource example: + * CREATE EXTERNAL RESOURCE "spark0" + * PROPERTIES + * ( + * "type" = "spark", + * "spark.master" = "yarn", + * "spark.submit.deployMode" = "cluster", + * "spark.jars" = "xxx.jar,yyy.jar", + * "spark.files" = "/tmp/aaa,/tmp/bbb", + * "spark.executor.memory" = "1g", + * "spark.yarn.queue" = "queue0", + * "spark.hadoop.yarn.resourcemanager.address" = "127.0.0.1:9999", + * "spark.hadoop.fs.defaultFS" = "hdfs://127.0.0.1:10000", + * "working_dir" = "hdfs://127.0.0.1:10000/tmp/doris", + * "broker" = "broker0", + * "broker.username" = "user0", + * "broker.password" = "password0" + * ); + * + * DROP RESOURCE "spark0"; + */ +public class SparkResource extends Resource { + private static final String SPARK_MASTER = "spark.master"; + private static final String SPARK_SUBMIT_DEPLOY_MODE = "spark.submit.deployMode"; + private static final String WORKING_DIR = "working_dir"; + private static final String BROKER = "broker"; + private static final String YARN_MASTER = "yarn"; + private static final String SPARK_CONFIG_PREFIX = "spark."; + private static final String BROKER_PROPERTY_PREFIX = "broker."; + // spark uses hadoop configs in the form of spark.hadoop.* + private static final String SPARK_YARN_RESOURCE_MANAGER_ADDRESS = "spark.hadoop.yarn.resourcemanager.address"; + private static final String SPARK_FS_DEFAULT_FS = "spark.hadoop.fs.defaultFS"; + private static final String YARN_RESOURCE_MANAGER_ADDRESS = "yarn.resourcemanager.address"; + + public enum DeployMode { + CLUSTER, + CLIENT; + + public static DeployMode fromString(String deployMode) { + for (DeployMode mode : DeployMode.values()) { + if (mode.name().equalsIgnoreCase(deployMode)) { + return mode; + } + } + return null; + } + } + + @SerializedName(value = "sparkConfigs") + private Map sparkConfigs; + @SerializedName(value = "workingDir") + private String workingDir; + @SerializedName(value = "broker") + private String broker; + // broker username and password + @SerializedName(value = "brokerProperties") + private Map brokerProperties; + + public SparkResource(String name) { + this(name, Maps.newHashMap(), null, null, Maps.newHashMap()); + } + + private SparkResource(String name, Map sparkConfigs, String workingDir, String broker, + Map brokerProperties) { + super(name, ResourceType.SPARK); + this.sparkConfigs = sparkConfigs; + this.workingDir = workingDir; + this.broker = broker; + this.brokerProperties = brokerProperties; + } + + public String getMaster() { + return sparkConfigs.get(SPARK_MASTER); + } + + public DeployMode getDeployMode() { + return DeployMode.fromString(sparkConfigs.get(SPARK_SUBMIT_DEPLOY_MODE)); + } + + public String getWorkingDir() { + return workingDir; + } + + public String getBroker() { + return broker; + } + + public Map getBrokerPropertiesWithoutPrefix() { + Map properties = Maps.newHashMap(); + for (Map.Entry entry : brokerProperties.entrySet()) { + String key = entry.getKey(); + if (key.startsWith(BROKER_PROPERTY_PREFIX)) { + properties.put(key.substring(key.indexOf(".") + 1), entry.getValue()); + } + } + return properties; + } + + public Map getSparkConfigs() { + return sparkConfigs; + } + + public Pair getYarnResourcemanagerAddressPair() { + return Pair.create(YARN_RESOURCE_MANAGER_ADDRESS, sparkConfigs.get(SPARK_YARN_RESOURCE_MANAGER_ADDRESS)); + } + + public SparkResource getCopiedResource() { + return new SparkResource(name, Maps.newHashMap(sparkConfigs), workingDir, broker, brokerProperties); + } + + public boolean isYarnMaster() { + return getMaster().equalsIgnoreCase(YARN_MASTER); + } + + /* + public void update(ResourceDesc resourceDesc) throws DdlException { + Preconditions.checkState(name.equals(resourceDesc.getName())); + + Map properties = resourceDesc.getProperties(); + if (properties == null) { + return; + } + + // update spark configs + if (properties.containsKey(SPARK_MASTER)) { + throw new DdlException("Cannot change spark master"); + } + sparkConfigs.putAll(getSparkConfigs(properties)); + + // update working dir and broker + if (properties.containsKey(WORKING_DIR)) { + workingDir = properties.get(WORKING_DIR); + } + if (properties.containsKey(BROKER)) { + broker = properties.get(BROKER); + } + brokerProperties.putAll(getBrokerProperties(properties)); + } + */ + + @Override + protected void setProperties(Map properties) throws DdlException { + Preconditions.checkState(properties != null); + + // get spark configs + sparkConfigs = getSparkConfigs(properties); + // check master and deploy mode + if (getMaster() == null) { + throw new DdlException("Missing " + SPARK_MASTER + " in properties"); + } + String deployModeStr = sparkConfigs.get(SPARK_SUBMIT_DEPLOY_MODE); + if (deployModeStr != null) { + DeployMode deployMode = DeployMode.fromString(deployModeStr); + if (deployMode == null) { + throw new DdlException("Unknown deploy mode: " + deployModeStr); + } + } else { + throw new DdlException("Missing " + SPARK_SUBMIT_DEPLOY_MODE + " in properties"); + } + // if deploy machines do not set HADOOP_CONF_DIR env, we should set these configs blow + if ((!sparkConfigs.containsKey(SPARK_YARN_RESOURCE_MANAGER_ADDRESS) || !sparkConfigs.containsKey(SPARK_FS_DEFAULT_FS)) + && isYarnMaster()) { + throw new DdlException("Missing (" + SPARK_YARN_RESOURCE_MANAGER_ADDRESS + " and " + SPARK_FS_DEFAULT_FS + + ") in yarn master"); + } + + // check working dir and broker + workingDir = properties.get(WORKING_DIR); + broker = properties.get(BROKER); + if ((workingDir == null && broker != null) || (workingDir != null && broker == null)) { + throw new DdlException("working_dir and broker should be assigned at the same time"); + } + // check broker exist + if (broker != null && !Catalog.getInstance().getBrokerMgr().contaisnBroker(broker)) { + throw new DdlException("Unknown broker name(" + broker + ")"); + } + brokerProperties = getBrokerProperties(properties); + } + + private Map getSparkConfigs(Map properties) { + Map sparkConfigs = Maps.newHashMap(); + for (Map.Entry entry : properties.entrySet()) { + if (entry.getKey().startsWith(SPARK_CONFIG_PREFIX)) { + sparkConfigs.put(entry.getKey(), entry.getValue()); + } + } + return sparkConfigs; + } + + private Map getBrokerProperties(Map properties) { + Map brokerProperties = Maps.newHashMap(); + for (Map.Entry entry : properties.entrySet()) { + if (entry.getKey().startsWith(BROKER_PROPERTY_PREFIX)) { + brokerProperties.put(entry.getKey(), entry.getValue()); + } + } + return brokerProperties; + } + + @Override + protected void getProcNodeData(BaseProcResult result) { + String lowerCaseType = type.name().toLowerCase(); + for (Map.Entry entry : sparkConfigs.entrySet()) { + result.addRow(Lists.newArrayList(name, lowerCaseType, entry.getKey(), entry.getValue())); + } + if (workingDir != null) { + result.addRow(Lists.newArrayList(name, lowerCaseType, SparkResource.WORKING_DIR, workingDir)); + } + if (broker != null) { + result.addRow(Lists.newArrayList(name, lowerCaseType, SparkResource.BROKER, broker)); + } + for (Map.Entry entry : brokerProperties.entrySet()) { + result.addRow(Lists.newArrayList(name, lowerCaseType, entry.getKey(), entry.getValue())); + } + } +} diff --git a/fe/src/main/java/org/apache/doris/common/CaseSensibility.java b/fe/src/main/java/org/apache/doris/common/CaseSensibility.java index fa11d1d5390ced..ce5130646b7ac6 100644 --- a/fe/src/main/java/org/apache/doris/common/CaseSensibility.java +++ b/fe/src/main/java/org/apache/doris/common/CaseSensibility.java @@ -28,7 +28,8 @@ public enum CaseSensibility { ROLE(false), HOST(false), LABEL(false), - VARIABLES(true); + VARIABLES(true), + RESOURCE(true); private boolean caseSensitive; diff --git a/fe/src/main/java/org/apache/doris/common/FeNameFormat.java b/fe/src/main/java/org/apache/doris/common/FeNameFormat.java index 0a4ecd19285fbb..de386fffdf6571 100644 --- a/fe/src/main/java/org/apache/doris/common/FeNameFormat.java +++ b/fe/src/main/java/org/apache/doris/common/FeNameFormat.java @@ -99,6 +99,10 @@ public static void checkRoleName(String role, boolean canBeAdmin, String errMsg) } } + public static void checkResourceName(String resourceName) throws AnalysisException { + checkCommonName("resource", resourceName); + } + public static void checkCommonName(String type, String name) throws AnalysisException { if (Strings.isNullOrEmpty(name) || !name.matches(COMMON_NAME_REGEX)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_WRONG_NAME_FORMAT, type, name); diff --git a/fe/src/main/java/org/apache/doris/common/proc/AuthProcDir.java b/fe/src/main/java/org/apache/doris/common/proc/AuthProcDir.java index 4ab05a701709b3..0bf5008c507685 100644 --- a/fe/src/main/java/org/apache/doris/common/proc/AuthProcDir.java +++ b/fe/src/main/java/org/apache/doris/common/proc/AuthProcDir.java @@ -32,7 +32,7 @@ public class AuthProcDir implements ProcDirInterface { public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() .add("UserIdentity").add("Password").add("GlobalPrivs").add("DatabasePrivs") - .add("TablePrivs").build(); + .add("TablePrivs").add("ResourcePrivs").build(); private PaloAuth auth; diff --git a/fe/src/main/java/org/apache/doris/common/proc/ProcService.java b/fe/src/main/java/org/apache/doris/common/proc/ProcService.java index 5779393b202aea..ecdad70a56fdaa 100644 --- a/fe/src/main/java/org/apache/doris/common/proc/ProcService.java +++ b/fe/src/main/java/org/apache/doris/common/proc/ProcService.java @@ -42,6 +42,7 @@ private ProcService() { root.register("tasks", new TasksProcDir()); root.register("frontends", new FrontendsProcNode(Catalog.getInstance())); root.register("brokers", Catalog.getInstance().getBrokerMgr().getProcNode()); + root.register("resources", Catalog.getInstance().getResourceMgr().getProcNode()); root.register("load_error_hub", new LoadErrorHubProcNode(Catalog.getInstance())); root.register("transactions", new TransDbProcDir()); root.register("monitor", new MonitorProcDir()); diff --git a/fe/src/main/java/org/apache/doris/journal/JournalEntity.java b/fe/src/main/java/org/apache/doris/journal/JournalEntity.java index ada983b697c889..69971c10daebd1 100644 --- a/fe/src/main/java/org/apache/doris/journal/JournalEntity.java +++ b/fe/src/main/java/org/apache/doris/journal/JournalEntity.java @@ -26,6 +26,7 @@ import org.apache.doris.backup.RestoreJob; import org.apache.doris.catalog.BrokerMgr; import org.apache.doris.catalog.Database; +import org.apache.doris.catalog.Resource; import org.apache.doris.catalog.Function; import org.apache.doris.catalog.FunctionSearchDesc; import org.apache.doris.cluster.BaseParam; @@ -498,6 +499,17 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } + case OperationType.OP_CREATE_RESOURCE: { + data = Resource.read(in); + isRead = true; + break; + } + case OperationType.OP_DROP_RESOURCE: { + data = new Text(); + ((Text) data).readFields(in); + isRead = true; + break; + } case OperationType.OP_CREATE_SMALL_FILE: case OperationType.OP_DROP_SMALL_FILE: { data = SmallFile.read(in); diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/DbPrivEntry.java b/fe/src/main/java/org/apache/doris/mysql/privilege/DbPrivEntry.java index 746f4d53f83d65..f36d6793f1d96b 100644 --- a/fe/src/main/java/org/apache/doris/mysql/privilege/DbPrivEntry.java +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/DbPrivEntry.java @@ -56,8 +56,8 @@ public static DbPrivEntry create(String host, String db, String user, boolean is PatternMatcher userPattern = PatternMatcher.createMysqlPattern(user, CaseSensibility.USER.getCaseSensibility()); - if (privs.containsNodePriv()) { - throw new AnalysisException("Db privilege can not contains global privileges: " + privs); + if (privs.containsNodePriv() || privs.containsResourcePriv()) { + throw new AnalysisException("Db privilege can not contains global or resource privileges: " + privs); } return new DbPrivEntry(hostPattern, host, dbPattern, db, userPattern, user, isDomain, privs); diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java b/fe/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java index 44dc879ab90323..2565b66875acd6 100644 --- a/fe/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java @@ -22,6 +22,7 @@ import org.apache.doris.analysis.DropRoleStmt; import org.apache.doris.analysis.DropUserStmt; import org.apache.doris.analysis.GrantStmt; +import org.apache.doris.analysis.ResourcePattern; import org.apache.doris.analysis.RevokeStmt; import org.apache.doris.analysis.SetPassVar; import org.apache.doris.analysis.SetUserPropertyStmt; @@ -68,6 +69,7 @@ public class PaloAuth implements Writable { private UserPrivTable userPrivTable = new UserPrivTable(); private DbPrivTable dbPrivTable = new DbPrivTable(); private TablePrivTable tablePrivTable = new TablePrivTable(); + private ResourcePrivTable resourcePrivTable = new ResourcePrivTable(); private RoleManager roleManager = new RoleManager();; private UserPropertyMgr propertyMgr = new UserPropertyMgr(); @@ -91,7 +93,7 @@ private void writeUnlock() { } public enum PrivLevel { - GLOBAL, DATABASE, TABLE + GLOBAL, DATABASE, TABLE, RESOURCE } public PaloAuth() { @@ -196,6 +198,33 @@ private void revokeTblPrivs(UserIdentity userIdentity, String db, String tbl, Pr tablePrivTable.revoke(entry, errOnNonExist, true /* delete entry when empty */); } + private void grantResourcePrivs(UserIdentity userIdentity, String resourceName, boolean errOnExist, + boolean errOnNonExist, PrivBitSet privs) throws DdlException { + ResourcePrivEntry entry; + try { + entry = ResourcePrivEntry.create(userIdentity.getHost(), resourceName, userIdentity.getQualifiedUser(), + userIdentity.isDomain(), privs); + entry.setSetByDomainResolver(false); + } catch (AnalysisException e) { + throw new DdlException(e.getMessage()); + } + resourcePrivTable.addEntry(entry, errOnExist, errOnNonExist); + } + + private void revokeResourcePrivs(UserIdentity userIdentity, String resourceName, PrivBitSet privs, + boolean errOnNonExist) throws DdlException { + ResourcePrivEntry entry; + try { + entry = ResourcePrivEntry.create(userIdentity.getHost(), resourceName, userIdentity.getQualifiedUser(), + userIdentity.isDomain(), privs); + entry.setSetByDomainResolver(false); + } catch (AnalysisException e) { + throw new DdlException(e.getMessage()); + } + + resourcePrivTable.revoke(entry, errOnNonExist, true /* delete entry when empty */); + } + /* * check password, if matched, save the userIdentity in matched entry. * the following auth checking should use userIdentity saved in currentUser. @@ -324,6 +353,25 @@ public boolean checkTblPriv(UserIdentity currentUser, String db, String tbl, Pri return false; } + public boolean checkResourcePriv(ConnectContext ctx, String resourceName, PrivPredicate wanted) { + return checkResourcePriv(ctx.getCurrentUserIdentity(), resourceName, wanted); + } + + public boolean checkResourcePriv(UserIdentity currentUser, String resourceName, PrivPredicate wanted) { + if (!Config.enable_auth_check) { + return true; + } + + PrivBitSet savedPrivs = PrivBitSet.of(); + if (checkGlobalInternal(currentUser, wanted, savedPrivs) + || checkResourceInternal(currentUser, resourceName, wanted, savedPrivs)) { + return true; + } + + LOG.debug("failed to get wanted privs: {}, granted: {}", wanted, savedPrivs); + return false; + } + public boolean checkPrivByAuthInfo(ConnectContext ctx, AuthorizationInfo authInfo, PrivPredicate wanted) { if (authInfo == null) { return false; @@ -417,11 +465,26 @@ private boolean checkTblInternal(UserIdentity currentUser, String db, String tbl } } + private boolean checkResourceInternal(UserIdentity currentUser, String resourceName, + PrivPredicate wanted, PrivBitSet savedPrivs) { + readLock(); + try { + resourcePrivTable.getPrivs(currentUser, resourceName, savedPrivs); + if (PaloPrivilege.satisfy(savedPrivs, wanted)) { + return true; + } + return false; + } finally { + readUnlock(); + } + } + // for test only public void clear() { userPrivTable.clear(); dbPrivTable.clear(); tablePrivTable.clear(); + resourcePrivTable.clear(); } // create user @@ -473,6 +536,11 @@ private void createUserInternal(UserIdentity userIdent, String roleName, byte[] grantInternal(userIdent, null /* role */, entry.getKey(), entry.getValue().copy(), false /* err on non exist */, true /* is replay */); } + for (Map.Entry entry : role.getResourcePatternToPrivs().entrySet()) { + // use PrivBitSet copy to avoid same object being changed synchronously + grantInternal(userIdent, null /* role */, entry.getKey(), entry.getValue().copy(), + false /* err on non exist */, true /* is replay */); + } } if (role != null) { @@ -496,7 +564,7 @@ private void createUserInternal(UserIdentity userIdent, String roleName, byte[] } if (!isReplay) { - PrivInfo privInfo = new PrivInfo(userIdent, null, null, password, roleName); + PrivInfo privInfo = new PrivInfo(userIdent, null, password, roleName); Catalog.getCurrentCatalog().getEditLog().logCreateUser(privInfo); } LOG.info("finished to create user: {}, is replay: {}", userIdent, isReplay); @@ -527,6 +595,7 @@ private void dropUserInternal(UserIdentity userIdent, boolean isReplay) { userPrivTable.dropUser(userIdent); dbPrivTable.dropUser(userIdent); tablePrivTable.dropUser(userIdent); + resourcePrivTable.dropUser(userIdent); // drop user in roles if exist roleManager.dropUser(userIdent); @@ -552,15 +621,26 @@ private void dropUserInternal(UserIdentity userIdent, boolean isReplay) { // grant public void grant(GrantStmt stmt) throws DdlException { PrivBitSet privs = PrivBitSet.of(stmt.getPrivileges()); - grantInternal(stmt.getUserIdent(), stmt.getQualifiedRole(), stmt.getTblPattern(), privs, - true /* err on non exist */, false /* not replay */); + if (stmt.getTblPattern() != null) { + grantInternal(stmt.getUserIdent(), stmt.getQualifiedRole(), stmt.getTblPattern(), privs, + true /* err on non exist */, false /* not replay */); + } else { + grantInternal(stmt.getUserIdent(), stmt.getQualifiedRole(), stmt.getResourcePattern(), privs, + true /* err on non exist */, false /* not replay */); + } } public void replayGrant(PrivInfo privInfo) { try { - grantInternal(privInfo.getUserIdent(), privInfo.getRole(), - privInfo.getTblPattern(), privInfo.getPrivs(), - true /* err on non exist */, true /* is replay */); + if (privInfo.getTblPattern() != null) { + grantInternal(privInfo.getUserIdent(), privInfo.getRole(), + privInfo.getTblPattern(), privInfo.getPrivs(), + true /* err on non exist */, true /* is replay */); + } else { + grantInternal(privInfo.getUserIdent(), privInfo.getRole(), + privInfo.getResourcePattern(), privInfo.getPrivs(), + true /* err on non exist */, true /* is replay */); + } } catch (DdlException e) { LOG.error("should not happen", e); } @@ -597,6 +677,36 @@ private void grantInternal(UserIdentity userIdent, String role, TablePattern tbl } } + private void grantInternal(UserIdentity userIdent, String role, ResourcePattern resourcePattern, PrivBitSet privs, + boolean errOnNonExist, boolean isReplay) throws DdlException { + writeLock(); + try { + if (role != null) { + // grant privs to role, role must exist + PaloRole newRole = new PaloRole(role, resourcePattern, privs); + PaloRole existingRole = roleManager.addRole(newRole, false /* err on exist */); + + // update users' privs of this role + for (UserIdentity user : existingRole.getUsers()) { + for (Map.Entry entry : existingRole.getResourcePatternToPrivs().entrySet()) { + // copy the PrivBitSet + grantPrivs(user, entry.getKey(), entry.getValue().copy(), errOnNonExist); + } + } + } else { + grantPrivs(userIdent, resourcePattern, privs, errOnNonExist); + } + + if (!isReplay) { + PrivInfo info = new PrivInfo(userIdent, resourcePattern, privs, null, role); + Catalog.getCurrentCatalog().getEditLog().logGrantPriv(info); + } + LOG.info("finished to grant resource privilege. is replay: {}", isReplay); + } finally { + writeUnlock(); + } + } + public void grantPrivs(UserIdentity userIdent, TablePattern tblPattern, PrivBitSet privs, boolean errOnNonExist) throws DdlException { LOG.debug("grant {} on {} to {}, err on non exist: {}", privs, tblPattern, userIdent, errOnNonExist); @@ -637,6 +747,33 @@ public void grantPrivs(UserIdentity userIdent, TablePattern tblPattern, PrivBitS } } + public void grantPrivs(UserIdentity userIdent, ResourcePattern resourcePattern, PrivBitSet privs, + boolean errOnNonExist) throws DdlException { + LOG.debug("grant {} on resource {} to {}, err on non exist: {}", privs, resourcePattern, userIdent, errOnNonExist); + + writeLock(); + try { + // check if user identity already exist + if (errOnNonExist && !doesUserExist(userIdent)) { + throw new DdlException("user " + userIdent + " does not exist"); + } + + // grant privs to user + switch (resourcePattern.getPrivLevel()) { + case GLOBAL: + grantGlobalPrivs(userIdent, false, errOnNonExist, privs); + break; + case RESOURCE: + grantResourcePrivs(userIdent, resourcePattern.getResourceName(), false, false, privs); + break; + default: + Preconditions.checkNotNull(null, resourcePattern.getPrivLevel()); + } + } finally { + writeUnlock(); + } + } + // return true if user ident exist private boolean doesUserExist(UserIdentity userIdent) { if (userIdent.isDomain()) { @@ -649,14 +786,24 @@ private boolean doesUserExist(UserIdentity userIdent) { // revoke public void revoke(RevokeStmt stmt) throws DdlException { PrivBitSet privs = PrivBitSet.of(stmt.getPrivileges()); - revokeInternal(stmt.getUserIdent(), stmt.getQualifiedRole(), stmt.getTblPattern(), privs, - true /* err on non exist */, false /* is replay */); + if (stmt.getTblPattern() != null) { + revokeInternal(stmt.getUserIdent(), stmt.getQualifiedRole(), stmt.getTblPattern(), privs, + true /* err on non exist */, false /* is replay */); + } else { + revokeInternal(stmt.getUserIdent(), stmt.getQualifiedRole(), stmt.getResourcePattern(), privs, + true /* err on non exist */, false /* is replay */); + } } public void replayRevoke(PrivInfo info) { try { - revokeInternal(info.getUserIdent(), info.getRole(), info.getTblPattern(), info.getPrivs(), - true /* err on non exist */, true /* is replay */); + if (info.getTblPattern() != null) { + revokeInternal(info.getUserIdent(), info.getRole(), info.getTblPattern(), info.getPrivs(), + true /* err on non exist */, true /* is replay */); + } else { + revokeInternal(info.getUserIdent(), info.getRole(), info.getResourcePattern(), info.getPrivs(), + true /* err on non exist */, true /* is replay */); + } } catch (DdlException e) { LOG.error("should not happend", e); } @@ -689,6 +836,33 @@ private void revokeInternal(UserIdentity userIdent, String role, TablePattern tb } } + private void revokeInternal(UserIdentity userIdent, String role, ResourcePattern resourcePattern, + PrivBitSet privs, boolean errOnNonExist, boolean isReplay) throws DdlException { + writeLock(); + try { + if (role != null) { + // revoke privs from role + PaloRole existingRole = roleManager.revokePrivs(role, resourcePattern, privs, errOnNonExist); + if (existingRole != null) { + // revoke privs from users of this role + for (UserIdentity user : existingRole.getUsers()) { + revokePrivs(user, resourcePattern, privs, false /* err on non exist */); + } + } + } else { + revokePrivs(userIdent, resourcePattern, privs, errOnNonExist); + } + + if (!isReplay) { + PrivInfo info = new PrivInfo(userIdent, resourcePattern, privs, null, role); + Catalog.getCurrentCatalog().getEditLog().logRevokePriv(info); + } + LOG.info("finished to revoke privilege. is replay: {}", isReplay); + } finally { + writeUnlock(); + } + } + public void revokePrivs(UserIdentity userIdent, TablePattern tblPattern, PrivBitSet privs, boolean errOnNonExist) throws DdlException { writeLock(); @@ -712,6 +886,23 @@ public void revokePrivs(UserIdentity userIdent, TablePattern tblPattern, PrivBit } } + public void revokePrivs(UserIdentity userIdent, ResourcePattern resourcePattern, PrivBitSet privs, + boolean errOnNonExist) throws DdlException { + writeLock(); + try { + switch (resourcePattern.getPrivLevel()) { + case GLOBAL: + revokeGlobalPrivs(userIdent, privs, errOnNonExist); + break; + case RESOURCE: + revokeResourcePrivs(userIdent, resourcePattern.getResourceName(), privs, errOnNonExist); + break; + } + } finally { + writeUnlock(); + } + } + // set password public void setPassword(SetPassVar stmt) throws DdlException { setPasswordInternal(stmt.getUserIdent(), stmt.getPassword(), null, true /* err on non exist */, @@ -752,7 +943,7 @@ public void setPasswordInternal(UserIdentity userIdent, byte[] password, UserIde } if (!isReplay) { - PrivInfo info = new PrivInfo(userIdent, null, null, password, null); + PrivInfo info = new PrivInfo(userIdent, null, password, null); Catalog.getCurrentCatalog().getEditLog().logSetPassword(info); } } finally { @@ -781,7 +972,7 @@ private void createRoleInternal(String role, boolean isReplay) throws DdlExcepti roleManager.addRole(emptyPrivsRole, true /* err on exist */); if (!isReplay) { - PrivInfo info = new PrivInfo(null, null, null, null, role); + PrivInfo info = new PrivInfo(null, null, null, role); Catalog.getCurrentCatalog().getEditLog().logCreateRole(info); } } finally { @@ -809,7 +1000,7 @@ private void dropRoleInternal(String role, boolean isReplay) throws DdlException roleManager.dropRole(role, true /* err on non exist */); if (!isReplay) { - PrivInfo info = new PrivInfo(null, null, null, null, role); + PrivInfo info = new PrivInfo(null, null, null, role); Catalog.getCurrentCatalog().getEditLog().logDropRole(info); } } finally { @@ -970,6 +1161,22 @@ private void getUserAuthInfo(List> userAuthInfos, UserIdentity user userAuthInfo.add(Joiner.on("; ").join(tblPrivs)); } + // resource + List resourcePrivs = Lists.newArrayList(); + for (PrivEntry entry : resourcePrivTable.entries) { + if (!entry.match(userIdent, true /* exact match */)) { + continue; + } + ResourcePrivEntry rEntry = (ResourcePrivEntry) entry; + resourcePrivs.add(rEntry.getOrigResource() + ": " + rEntry.getPrivSet().toString() + + " (" + entry.isSetByDomainResolver() + ")"); + } + if (resourcePrivs.isEmpty()) { + userAuthInfo.add("N/A"); + } else { + userAuthInfo.add(Joiner.on("; ").join(resourcePrivs)); + } + userAuthInfos.add(userAuthInfo); } @@ -993,6 +1200,12 @@ private Set getAllUserIdents(boolean includeEntrySetByResolver) { } userIdents.add(entry.getUserIdent()); } + for (PrivEntry entry : resourcePrivTable.entries) { + if (!includeEntrySetByResolver && entry.isSetByDomainResolver()) { + continue; + } + userIdents.add(entry.getUserIdent()); + } return userIdents; } @@ -1098,6 +1311,8 @@ public void write(DataOutput out) throws IOException { userPrivTable.write(out); dbPrivTable.write(out); tablePrivTable.write(out); + // TODO(wyb): spark-load + //resourcePrivTable.write(out); propertyMgr.write(out); } @@ -1106,6 +1321,12 @@ public void readFields(DataInput in) throws IOException { userPrivTable = (UserPrivTable) PrivTable.read(in); dbPrivTable = (DbPrivTable) PrivTable.read(in); tablePrivTable = (TablePrivTable) PrivTable.read(in); + // TODO(wyb): spark-load + /* + if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.new_version_by_wyb) { + resourcePrivTable = (ResourcePrivTable) PrivTable.read(in); + } + */ propertyMgr = UserPropertyMgr.read(in); if (userPrivTable.isEmpty()) { @@ -1120,6 +1341,7 @@ public String toString() { sb.append(userPrivTable).append("\n"); sb.append(dbPrivTable).append("\n"); sb.append(tablePrivTable).append("\n"); + sb.append(resourcePrivTable).append("\n"); sb.append(roleManager).append("\n"); sb.append(propertyMgr).append("\n"); return sb.toString(); diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/PaloPrivilege.java b/fe/src/main/java/org/apache/doris/mysql/privilege/PaloPrivilege.java index 25895c9e3a0d02..d7931151776cb4 100644 --- a/fe/src/main/java/org/apache/doris/mysql/privilege/PaloPrivilege.java +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/PaloPrivilege.java @@ -25,7 +25,8 @@ public enum PaloPrivilege { LOAD_PRIV("Load_priv", 4, "Privilege for loading data into tables"), ALTER_PRIV("Alter_priv", 5, "Privilege for alter database or table"), CREATE_PRIV("Create_priv", 6, "Privilege for createing database or table"), - DROP_PRIV("Drop_priv", 7, "Privilege for dropping database or table"); + DROP_PRIV("Drop_priv", 7, "Privilege for dropping database or table"), + USAGE_PRIV("Usage_priv", 8, "Privilege for using resource"); public static PaloPrivilege[] privileges = { NODE_PRIV, @@ -35,7 +36,8 @@ public enum PaloPrivilege { LOAD_PRIV, ALTER_PRIV, CREATE_PRIV, - DROP_PRIV + DROP_PRIV, + USAGE_PRIV }; private String name; diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/PaloRole.java b/fe/src/main/java/org/apache/doris/mysql/privilege/PaloRole.java index e960c79e82c4eb..7d2f12353f270c 100644 --- a/fe/src/main/java/org/apache/doris/mysql/privilege/PaloRole.java +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/PaloRole.java @@ -17,6 +17,7 @@ package org.apache.doris.mysql.privilege; +import org.apache.doris.analysis.ResourcePattern; import org.apache.doris.analysis.TablePattern; import org.apache.doris.analysis.UserIdentity; import org.apache.doris.common.io.Text; @@ -39,13 +40,16 @@ public class PaloRole implements Writable { // admin is like DBA, who has all privileges except for NODE privilege held by operator public static String ADMIN_ROLE = "admin"; - public static PaloRole OPERATOR = new PaloRole(OPERATOR_ROLE, TablePattern.ALL, - PrivBitSet.of(PaloPrivilege.NODE_PRIV, PaloPrivilege.ADMIN_PRIV)); - public static PaloRole ADMIN = new PaloRole(ADMIN_ROLE, TablePattern.ALL, - PrivBitSet.of(PaloPrivilege.ADMIN_PRIV)); + public static PaloRole OPERATOR = new PaloRole(OPERATOR_ROLE, + TablePattern.ALL, PrivBitSet.of(PaloPrivilege.NODE_PRIV, PaloPrivilege.ADMIN_PRIV), + ResourcePattern.ALL, PrivBitSet.of(PaloPrivilege.NODE_PRIV, PaloPrivilege.ADMIN_PRIV)); + public static PaloRole ADMIN = new PaloRole(ADMIN_ROLE, + TablePattern.ALL, PrivBitSet.of(PaloPrivilege.ADMIN_PRIV), + ResourcePattern.ALL, PrivBitSet.of(PaloPrivilege.ADMIN_PRIV)); private String roleName; private Map tblPatternToPrivs = Maps.newConcurrentMap(); + private Map resourcePatternToPrivs = Maps.newConcurrentMap(); // users which this role private Set users = Sets.newConcurrentHashSet(); @@ -62,6 +66,18 @@ public PaloRole(String roleName, TablePattern tablePattern, PrivBitSet privs) { this.tblPatternToPrivs.put(tablePattern, privs); } + public PaloRole(String roleName, ResourcePattern resourcePattern, PrivBitSet privs) { + this.roleName = roleName; + this.resourcePatternToPrivs.put(resourcePattern, privs); + } + + public PaloRole(String roleName, TablePattern tablePattern, PrivBitSet tablePrivs, + ResourcePattern resourcePattern, PrivBitSet resourcePrivs) { + this.roleName = roleName; + this.tblPatternToPrivs.put(tablePattern, tablePrivs); + this.resourcePatternToPrivs.put(resourcePattern, resourcePrivs); + } + public String getRoleName() { return roleName; } @@ -70,6 +86,10 @@ public Map getTblPatternToPrivs() { return tblPatternToPrivs; } + public Map getResourcePatternToPrivs() { + return resourcePatternToPrivs; + } + public Set getUsers() { return users; } @@ -84,6 +104,14 @@ public void merge(PaloRole other) { tblPatternToPrivs.put(entry.getKey(), entry.getValue()); } } + for (Map.Entry entry : other.resourcePatternToPrivs.entrySet()) { + if (resourcePatternToPrivs.containsKey(entry.getKey())) { + PrivBitSet existPrivs = resourcePatternToPrivs.get(entry.getKey()); + existPrivs.or(entry.getValue()); + } else { + resourcePatternToPrivs.put(entry.getKey(), entry.getValue()); + } + } } public void addUser(UserIdentity userIdent) { @@ -114,7 +142,14 @@ public void write(DataOutput out) throws IOException { entry.getKey().write(out); entry.getValue().write(out); } - + // TODO(wyb): spark-load + /* + out.writeInt(resourcePatternToPrivs.size()); + for (Map.Entry entry : resourcePatternToPrivs.entrySet()) { + entry.getKey().write(out); + entry.getValue().write(out); + } + */ out.writeInt(users.size()); for (UserIdentity userIdentity : users) { userIdentity.write(out); @@ -129,6 +164,17 @@ public void readFields(DataInput in) throws IOException { PrivBitSet privs = PrivBitSet.read(in); tblPatternToPrivs.put(tblPattern, privs); } + // TODO(wyb): spark-load + /* + if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.new_version_by_wyb) { + size = in.readInt(); + for (int i = 0; i < size; i++) { + ResourcePattern resourcePattern = ResourcePattern.read(in); + PrivBitSet privs = PrivBitSet.read(in); + resourcePatternToPrivs.put(resourcePattern, privs); + } + } + */ size = in.readInt(); for (int i = 0; i < size; i++) { UserIdentity userIdentity = UserIdentity.read(in); @@ -139,7 +185,8 @@ public void readFields(DataInput in) throws IOException { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("role: ").append(roleName).append(", privs: ").append(tblPatternToPrivs); + sb.append("role: ").append(roleName).append(", db table privs: ").append(tblPatternToPrivs); + sb.append(", resource privs: ").append(resourcePatternToPrivs); sb.append(", users: ").append(users); return sb.toString(); } diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/PrivBitSet.java b/fe/src/main/java/org/apache/doris/mysql/privilege/PrivBitSet.java index fc99da06d90826..58fd169c597ad5 100644 --- a/fe/src/main/java/org/apache/doris/mysql/privilege/PrivBitSet.java +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/PrivBitSet.java @@ -89,6 +89,15 @@ public boolean containsNodePriv() { return containsPrivs(PaloPrivilege.NODE_PRIV); } + public boolean containsResourcePriv() { + return containsPrivs(PaloPrivilege.USAGE_PRIV); + } + + public boolean containsDbTablePriv() { + return containsPrivs(PaloPrivilege.SELECT_PRIV, PaloPrivilege.LOAD_PRIV, PaloPrivilege.ALTER_PRIV, + PaloPrivilege.CREATE_PRIV, PaloPrivilege.DROP_PRIV); + } + public boolean containsPrivs(PaloPrivilege... privs) { for (PaloPrivilege priv : privs) { if (get(priv.getIdx())) { diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/PrivPredicate.java b/fe/src/main/java/org/apache/doris/mysql/privilege/PrivPredicate.java index 518d4ff9f05778..3d8d060f2425bc 100644 --- a/fe/src/main/java/org/apache/doris/mysql/privilege/PrivPredicate.java +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/PrivPredicate.java @@ -27,7 +27,8 @@ public class PrivPredicate { PaloPrivilege.LOAD_PRIV, PaloPrivilege.ALTER_PRIV, PaloPrivilege.CREATE_PRIV, - PaloPrivilege.DROP_PRIV), + PaloPrivilege.DROP_PRIV, + PaloPrivilege.USAGE_PRIV), Operator.OR); // create/drop/alter/show user public static final PrivPredicate GRANT = PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV, @@ -66,6 +67,11 @@ public class PrivPredicate { public static final PrivPredicate OPERATOR = PrivPredicate.of(PrivBitSet.of(PaloPrivilege.NODE_PRIV), Operator.OR); + // resource usage + public static final PrivPredicate USAGE = PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV, + PaloPrivilege.USAGE_PRIV), + Operator.OR); + // all public static final PrivPredicate ALL = PrivPredicate.of(PrivBitSet.of(PaloPrivilege.NODE_PRIV, PaloPrivilege.ADMIN_PRIV, @@ -73,7 +79,8 @@ public class PrivPredicate { PaloPrivilege.LOAD_PRIV, PaloPrivilege.ALTER_PRIV, PaloPrivilege.CREATE_PRIV, - PaloPrivilege.DROP_PRIV), + PaloPrivilege.DROP_PRIV, + PaloPrivilege.USAGE_PRIV), Operator.OR); private PrivBitSet privs; diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivEntry.java b/fe/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivEntry.java new file mode 100644 index 00000000000000..0137e332ace9fd --- /dev/null +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivEntry.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.mysql.privilege; + +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.CaseSensibility; +import org.apache.doris.common.PatternMatcher; +import org.apache.doris.common.io.Text; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +public class ResourcePrivEntry extends PrivEntry { + protected static final String ANY_RESOURCE = "*"; + + protected PatternMatcher resourcePattern; + protected String origResource; + protected boolean isAnyResource; + + protected ResourcePrivEntry() { + } + + protected ResourcePrivEntry(PatternMatcher hostPattern, String origHost, PatternMatcher resourcePattern, String origResource, + PatternMatcher userPattern, String user, boolean isDomain, PrivBitSet privSet) { + super(hostPattern, origHost, userPattern, user, isDomain, privSet); + this.resourcePattern = resourcePattern; + this.origResource = origResource; + if (origResource.equals(ANY_RESOURCE)) { + isAnyResource = true; + } + } + + public static ResourcePrivEntry create(String host, String resourceName, String user, boolean isDomain, PrivBitSet privs) + throws AnalysisException { + PatternMatcher hostPattern = PatternMatcher.createMysqlPattern(host, CaseSensibility.HOST.getCaseSensibility()); + PatternMatcher resourcePattern = PatternMatcher.createMysqlPattern(resourceName.equals(ANY_RESOURCE) ? "%" : resourceName, + CaseSensibility.RESOURCE.getCaseSensibility()); + PatternMatcher userPattern = PatternMatcher.createMysqlPattern(user, CaseSensibility.USER.getCaseSensibility()); + if (privs.containsNodePriv() || privs.containsDbTablePriv()) { + throw new AnalysisException("Resource privilege can not contains node or db table privileges: " + privs); + } + return new ResourcePrivEntry(hostPattern, host, resourcePattern, resourceName, userPattern, user, isDomain, privs); + } + + public PatternMatcher getResourcePattern() { + return resourcePattern; + } + + public String getOrigResource() { + return origResource; + } + + @Override + public int compareTo(PrivEntry other) { + if (!(other instanceof ResourcePrivEntry)) { + throw new ClassCastException("cannot cast " + other.getClass().toString() + " to " + this.getClass()); + } + + ResourcePrivEntry otherEntry = (ResourcePrivEntry) other; + int res = origHost.compareTo(otherEntry.origHost); + if (res != 0) { + return -res; + } + + res = origResource.compareTo(otherEntry.origResource); + if (res != 0) { + return -res; + } + + return -origUser.compareTo(otherEntry.origUser); + } + + @Override + public boolean keyMatch(PrivEntry other) { + if (!(other instanceof ResourcePrivEntry)) { + return false; + } + + ResourcePrivEntry otherEntry = (ResourcePrivEntry) other; + if (origHost.equals(otherEntry.origHost) && origUser.equals(otherEntry.origUser) + && origResource.equals(otherEntry.origResource) && isDomain == otherEntry.isDomain) { + return true; + } + return false; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("resource priv. host: ").append(origHost).append(", resource: ").append(origResource); + sb.append(", user: ").append(origUser); + sb.append(", priv: ").append(privSet).append(", set by resolver: ").append(isSetByDomainResolver); + return sb.toString(); + } + + @Override + public void write(DataOutput out) throws IOException { + if (!isClassNameWrote) { + String className = ResourcePrivEntry.class.getCanonicalName(); + Text.writeString(out, className); + isClassNameWrote = true; + } + super.write(out); + Text.writeString(out, origResource); + isClassNameWrote = false; + } + + public void readFields(DataInput in) throws IOException { + super.readFields(in); + origResource = Text.readString(in); + try { + resourcePattern = PatternMatcher.createMysqlPattern(origResource, CaseSensibility.RESOURCE.getCaseSensibility()); + } catch (AnalysisException e) { + throw new IOException(e); + } + isAnyResource = origResource.equals(ANY_RESOURCE); + } +} diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivTable.java b/fe/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivTable.java new file mode 100644 index 00000000000000..945a9dd5c3ba3c --- /dev/null +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivTable.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.mysql.privilege; + +import org.apache.doris.analysis.UserIdentity; +import org.apache.doris.common.io.Text; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.DataOutput; +import java.io.IOException; + +/* + * ResourcePrivTable saves all resources privs + */ +public class ResourcePrivTable extends PrivTable { + private static final Logger LOG = LogManager.getLogger(ResourcePrivTable.class); + + /* + * Return first priv which match the user@host on resourceName The returned priv will be + * saved in 'savedPrivs'. + */ + public void getPrivs(UserIdentity currentUser, String resourceName, PrivBitSet savedPrivs) { + ResourcePrivEntry matchedEntry = null; + for (PrivEntry entry : entries) { + ResourcePrivEntry resourcePrivEntry = (ResourcePrivEntry) entry; + + if (!resourcePrivEntry.match(currentUser, true)) { + continue; + } + + // check resource + if (!resourcePrivEntry.getResourcePattern().match(resourceName)) { + continue; + } + + matchedEntry = resourcePrivEntry; + break; + } + if (matchedEntry == null) { + return; + } + + savedPrivs.or(matchedEntry.getPrivSet()); + } + + @Override + public void write(DataOutput out) throws IOException { + if (!isClassNameWrote) { + String className = ResourcePrivTable.class.getCanonicalName(); + Text.writeString(out, className); + isClassNameWrote = true; + } + + super.write(out); + } +} diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/RoleManager.java b/fe/src/main/java/org/apache/doris/mysql/privilege/RoleManager.java index d2dac3591ded6a..54f8767e816641 100644 --- a/fe/src/main/java/org/apache/doris/mysql/privilege/RoleManager.java +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/RoleManager.java @@ -17,6 +17,7 @@ package org.apache.doris.mysql.privilege; +import org.apache.doris.analysis.ResourcePattern; import org.apache.doris.analysis.TablePattern; import org.apache.doris.analysis.UserIdentity; import org.apache.doris.common.DdlException; @@ -86,7 +87,30 @@ public PaloRole revokePrivs(String role, TablePattern tblPattern, PrivBitSet pri PrivBitSet existingPriv = map.get(tblPattern); if (existingPriv == null) { if (errOnNonExist) { - throw new DdlException(tblPattern + " does not eixst in role " + role); + throw new DdlException(tblPattern + " does not exist in role " + role); + } + return null; + } + + existingPriv.remove(privs); + return existingRole; + } + + public PaloRole revokePrivs(String role, ResourcePattern resourcePattern, PrivBitSet privs, boolean errOnNonExist) + throws DdlException { + PaloRole existingRole = roles.get(role); + if (existingRole == null) { + if (errOnNonExist) { + throw new DdlException("Role " + role + " does not exist"); + } + return null; + } + + Map map = existingRole.getResourcePatternToPrivs(); + PrivBitSet existingPriv = map.get(resourcePattern); + if (existingPriv == null) { + if (errOnNonExist) { + throw new DdlException(resourcePattern + " does not exist in role " + role); } return null; } @@ -148,6 +172,19 @@ public void getRoleInfo(List> results) { info.add(Joiner.on("; ").join(tmp)); } + // resource + tmp.clear(); + for (Map.Entry entry : role.getResourcePatternToPrivs().entrySet()) { + if (entry.getKey().getPrivLevel() == PrivLevel.RESOURCE) { + tmp.add(entry.getKey().toString() + ": " + entry.getValue().toString()); + } + } + if (tmp.isEmpty()) { + info.add("N/A"); + } else { + info.add(Joiner.on("; ").join(tmp)); + } + results.add(info); } } diff --git a/fe/src/main/java/org/apache/doris/mysql/privilege/TablePrivEntry.java b/fe/src/main/java/org/apache/doris/mysql/privilege/TablePrivEntry.java index 2fcd1e9c3f07d9..5343f2908aa5bf 100644 --- a/fe/src/main/java/org/apache/doris/mysql/privilege/TablePrivEntry.java +++ b/fe/src/main/java/org/apache/doris/mysql/privilege/TablePrivEntry.java @@ -57,8 +57,8 @@ public static TablePrivEntry create(String host, String db, String user, String PatternMatcher tblPattern = PatternMatcher.createMysqlPattern(tbl.equals(ANY_TBL) ? "%" : tbl, CaseSensibility.TABLE.getCaseSensibility()); - if (privs.containsNodePriv()) { - throw new AnalysisException("Table privilege can not contains global privileges: " + privs); + if (privs.containsNodePriv() || privs.containsResourcePriv()) { + throw new AnalysisException("Table privilege can not contains global or resource privileges: " + privs); } return new TablePrivEntry(hostPattern, host, dbPattern, db, userPattern, user, tblPattern, tbl, isDomain, privs); diff --git a/fe/src/main/java/org/apache/doris/persist/EditLog.java b/fe/src/main/java/org/apache/doris/persist/EditLog.java index 134899a8b42bfc..376209509dcbc3 100644 --- a/fe/src/main/java/org/apache/doris/persist/EditLog.java +++ b/fe/src/main/java/org/apache/doris/persist/EditLog.java @@ -29,6 +29,7 @@ import org.apache.doris.catalog.BrokerMgr; import org.apache.doris.catalog.Catalog; import org.apache.doris.catalog.Database; +import org.apache.doris.catalog.Resource; import org.apache.doris.catalog.Function; import org.apache.doris.catalog.FunctionSearchDesc; import org.apache.doris.cluster.BaseParam; @@ -683,6 +684,16 @@ public static void loadJournal(Catalog catalog, JournalEntity journal) { catalog.getLoadManager().replayEndLoadJob(operation); break; } + case OperationType.OP_CREATE_RESOURCE: { + final Resource resource = (Resource) journal.getData(); + catalog.getResourceMgr().replayCreateResource(resource); + break; + } + case OperationType.OP_DROP_RESOURCE: { + final String resourceName = journal.getData().toString(); + catalog.getResourceMgr().replayDropResource(resourceName); + break; + } case OperationType.OP_CREATE_SMALL_FILE: { SmallFile smallFile = (SmallFile) journal.getData(); catalog.getSmallFileMgr().replayCreateFile(smallFile); @@ -1254,6 +1265,16 @@ public void logEndLoadJob(LoadJobFinalOperation loadJobFinalOperation) { logEdit(OperationType.OP_END_LOAD_JOB, loadJobFinalOperation); } + public void logCreateResource(Resource resource) { + // TODO(wyb): spark-load + //logEdit(OperationType.OP_CREATE_RESOURCE, resource); + } + + public void logDropResource(String resourceName) { + // TODO(wyb): spark-load + //logEdit(OperationType.OP_DROP_RESOURCE, new Text(resourceName)); + } + public void logCreateSmallFile(SmallFile info) { logEdit(OperationType.OP_CREATE_SMALL_FILE, info); } diff --git a/fe/src/main/java/org/apache/doris/persist/OperationType.java b/fe/src/main/java/org/apache/doris/persist/OperationType.java index fd139cf1c53c7f..d06fcba8d30eb2 100644 --- a/fe/src/main/java/org/apache/doris/persist/OperationType.java +++ b/fe/src/main/java/org/apache/doris/persist/OperationType.java @@ -155,6 +155,8 @@ public class OperationType { public static final short OP_CREATE_LOAD_JOB = 230; // this finish op include finished and cancelled public static final short OP_END_LOAD_JOB = 231; + // update job info, used by spark load + //public static final short OP_UPDATE_LOAD_JOB = 232; // small files 251~260 public static final short OP_CREATE_SMALL_FILE = 251; @@ -172,4 +174,8 @@ public class OperationType { public static final short OP_INSTALL_PLUGIN = 270; public static final short OP_UNINSTALL_PLUGIN = 271; + + // resource 276~290 + public static final short OP_CREATE_RESOURCE = 276; + public static final short OP_DROP_RESOURCE = 277; } diff --git a/fe/src/main/java/org/apache/doris/persist/PrivInfo.java b/fe/src/main/java/org/apache/doris/persist/PrivInfo.java index 46b5a0f8437156..26b739430b73ed 100644 --- a/fe/src/main/java/org/apache/doris/persist/PrivInfo.java +++ b/fe/src/main/java/org/apache/doris/persist/PrivInfo.java @@ -17,6 +17,7 @@ package org.apache.doris.persist; +import org.apache.doris.analysis.ResourcePattern; import org.apache.doris.analysis.TablePattern; import org.apache.doris.analysis.UserIdentity; import org.apache.doris.common.io.Text; @@ -32,6 +33,7 @@ public class PrivInfo implements Writable { private UserIdentity userIdent; private TablePattern tblPattern; + private ResourcePattern resourcePattern; private PrivBitSet privs; private byte[] passwd; private String role; @@ -40,10 +42,30 @@ private PrivInfo() { } + public PrivInfo(UserIdentity userIdent, PrivBitSet privs, byte[] passwd, String role) { + this.userIdent = userIdent; + this.tblPattern = null; + this.resourcePattern = null; + this.privs = privs; + this.passwd = passwd; + this.role = role; + } + public PrivInfo(UserIdentity userIdent, TablePattern tablePattern, PrivBitSet privs, byte[] passwd, String role) { this.userIdent = userIdent; this.tblPattern = tablePattern; + this.resourcePattern = null; + this.privs = privs; + this.passwd = passwd; + this.role = role; + } + + public PrivInfo(UserIdentity userIdent, ResourcePattern resourcePattern, PrivBitSet privs, + byte[] passwd, String role) { + this.userIdent = userIdent; + this.tblPattern = null; + this.resourcePattern = resourcePattern; this.privs = privs; this.passwd = passwd; this.role = role; @@ -57,6 +79,10 @@ public TablePattern getTblPattern() { return tblPattern; } + public ResourcePattern getResourcePattern() { + return resourcePattern; + } + public PrivBitSet getPrivs() { return privs; } @@ -91,6 +117,16 @@ public void write(DataOutput out) throws IOException { out.writeBoolean(false); } + // TODO(wyb): spark-load + /* + if (resourcePattern != null) { + out.writeBoolean(true); + resourcePattern.write(out); + } else { + out.writeBoolean(false); + } + */ + if (privs != null) { out.writeBoolean(true); privs.write(out); @@ -123,6 +159,15 @@ public void readFields(DataInput in) throws IOException { tblPattern = TablePattern.read(in); } + // TODO(wyb): spark-load + /* + if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.new_version_by_wyb) { + if (in.readBoolean()) { + resourcePattern = ResourcePattern.read(in); + } + } + */ + if (in.readBoolean()) { privs = PrivBitSet.read(in); } diff --git a/fe/src/main/java/org/apache/doris/persist/gson/GsonUtils.java b/fe/src/main/java/org/apache/doris/persist/gson/GsonUtils.java index d136304c46718e..033ab036e839b7 100644 --- a/fe/src/main/java/org/apache/doris/persist/gson/GsonUtils.java +++ b/fe/src/main/java/org/apache/doris/persist/gson/GsonUtils.java @@ -18,9 +18,11 @@ package org.apache.doris.persist.gson; import org.apache.doris.catalog.DistributionInfo; +import org.apache.doris.catalog.Resource; import org.apache.doris.catalog.HashDistributionInfo; import org.apache.doris.catalog.RandomDistributionInfo; import org.apache.doris.catalog.ScalarType; +import org.apache.doris.catalog.SparkResource; import com.google.common.base.Preconditions; import com.google.common.collect.ArrayListMultimap; @@ -86,6 +88,11 @@ public class GsonUtils { .registerSubtype(HashDistributionInfo.class, HashDistributionInfo.class.getSimpleName()) .registerSubtype(RandomDistributionInfo.class, RandomDistributionInfo.class.getSimpleName()); + // runtime adapter for class "Resource" + private static RuntimeTypeAdapterFactory resourceTypeAdapterFactory = RuntimeTypeAdapterFactory + .of(Resource.class, "clazz") + .registerSubtype(SparkResource.class, SparkResource.class.getSimpleName()); + // the builder of GSON instance. // Add any other adapters if necessary. private static final GsonBuilder GSON_BUILDER = new GsonBuilder() @@ -95,7 +102,8 @@ public class GsonUtils { .registerTypeHierarchyAdapter(Multimap.class, new GuavaMultimapAdapter()) .registerTypeAdapterFactory(new PostProcessTypeAdapterFactory()) .registerTypeAdapterFactory(columnTypeAdapterFactory) - .registerTypeAdapterFactory(distributionInfoTypeAdapterFactory); + .registerTypeAdapterFactory(distributionInfoTypeAdapterFactory) + .registerTypeAdapterFactory(resourceTypeAdapterFactory); // this instance is thread-safe. public static final Gson GSON = GSON_BUILDER.create(); diff --git a/fe/src/main/java/org/apache/doris/qe/DdlExecutor.java b/fe/src/main/java/org/apache/doris/qe/DdlExecutor.java index c511a995069582..284daf6e39e9ea 100644 --- a/fe/src/main/java/org/apache/doris/qe/DdlExecutor.java +++ b/fe/src/main/java/org/apache/doris/qe/DdlExecutor.java @@ -39,6 +39,7 @@ import org.apache.doris.analysis.CreateFunctionStmt; import org.apache.doris.analysis.CreateMaterializedViewStmt; import org.apache.doris.analysis.CreateRepositoryStmt; +import org.apache.doris.analysis.CreateResourceStmt; import org.apache.doris.analysis.CreateRoleStmt; import org.apache.doris.analysis.CreateRoutineLoadStmt; import org.apache.doris.analysis.CreateTableStmt; @@ -52,6 +53,7 @@ import org.apache.doris.analysis.DropFunctionStmt; import org.apache.doris.analysis.DropMaterializedViewStmt; import org.apache.doris.analysis.DropRepositoryStmt; +import org.apache.doris.analysis.DropResourceStmt; import org.apache.doris.analysis.DropRoleStmt; import org.apache.doris.analysis.DropTableStmt; import org.apache.doris.analysis.DropUserStmt; @@ -217,6 +219,10 @@ public static void execute(Catalog catalog, DdlStmt ddlStmt, OriginStatement ori catalog.checkTablets((AdminCheckTabletsStmt) ddlStmt); } else if (ddlStmt instanceof AdminSetReplicaStatusStmt) { catalog.setReplicaStatus((AdminSetReplicaStatusStmt) ddlStmt); + } else if (ddlStmt instanceof CreateResourceStmt) { + catalog.getResourceMgr().createResource((CreateResourceStmt) ddlStmt); + } else if (ddlStmt instanceof DropResourceStmt) { + catalog.getResourceMgr().dropResource((DropResourceStmt) ddlStmt); } else { throw new DdlException("Unknown statement."); } diff --git a/fe/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/src/main/java/org/apache/doris/qe/ShowExecutor.java index 3104cacf524a12..5372c222582b85 100644 --- a/fe/src/main/java/org/apache/doris/qe/ShowExecutor.java +++ b/fe/src/main/java/org/apache/doris/qe/ShowExecutor.java @@ -38,6 +38,7 @@ import org.apache.doris.analysis.ShowDeleteStmt; import org.apache.doris.analysis.ShowDynamicPartitionStmt; import org.apache.doris.analysis.ShowEnginesStmt; +import org.apache.doris.analysis.ShowResourcesStmt; import org.apache.doris.analysis.ShowExportStmt; import org.apache.doris.analysis.ShowFrontendsStmt; import org.apache.doris.analysis.ShowFunctionsStmt; @@ -223,6 +224,8 @@ public ShowResultSet execute() throws AnalysisException { handleShowMigrations(); } else if (stmt instanceof ShowBrokerStmt) { handleShowBroker(); + } else if (stmt instanceof ShowResourcesStmt) { + handleShowResources(); } else if (stmt instanceof ShowExportStmt) { handleShowExport(); } else if (stmt instanceof ShowBackendsStmt) { @@ -1304,6 +1307,15 @@ private void handleShowBroker() { resultSet = new ShowResultSet(showStmt.getMetaData(), rowSet); } + // Handle show resources + private void handleShowResources() { + ShowResourcesStmt showStmt = (ShowResourcesStmt) stmt; + List> rowSet = Catalog.getInstance().getResourceMgr().getResourcesInfo(); + + // Only success + resultSet = new ShowResultSet(showStmt.getMetaData(), rowSet); + } + private void handleShowExport() throws AnalysisException { ShowExportStmt showExportStmt = (ShowExportStmt) stmt; Catalog catalog = Catalog.getInstance(); diff --git a/fe/src/main/jflex/sql_scanner.flex b/fe/src/main/jflex/sql_scanner.flex index e6d4ea0a1a482a..819fc36b7d9470 100644 --- a/fe/src/main/jflex/sql_scanner.flex +++ b/fe/src/main/jflex/sql_scanner.flex @@ -299,6 +299,7 @@ import org.apache.doris.qe.SqlModeHelper; keywordMap.put("repositories", new Integer(SqlParserSymbols.KW_REPOSITORIES)); keywordMap.put("repository", new Integer(SqlParserSymbols.KW_REPOSITORY)); keywordMap.put("resource", new Integer(SqlParserSymbols.KW_RESOURCE)); + keywordMap.put("resources", new Integer(SqlParserSymbols.KW_RESOURCES)); keywordMap.put("restore", new Integer(SqlParserSymbols.KW_RESTORE)); keywordMap.put("resume", new Integer(SqlParserSymbols.KW_RESUME)); keywordMap.put("returns", new Integer(SqlParserSymbols.KW_RETURNS)); diff --git a/fe/src/test/java/org/apache/doris/analysis/CreateResourceStmtTest.java b/fe/src/test/java/org/apache/doris/analysis/CreateResourceStmtTest.java new file mode 100644 index 00000000000000..f26797c5230efe --- /dev/null +++ b/fe/src/test/java/org/apache/doris/analysis/CreateResourceStmtTest.java @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.catalog.Catalog; +import org.apache.doris.catalog.Resource; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.UserException; +import org.apache.doris.mysql.privilege.PaloAuth; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.qe.ConnectContext; + +import com.google.common.collect.Maps; +import mockit.Expectations; +import mockit.Injectable; +import mockit.Mocked; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.Map; + +public class CreateResourceStmtTest { + private Analyzer analyzer; + private String resourceName; + + @Before() + public void setUp() { + analyzer = AccessTestUtil.fetchAdminAnalyzer(true); + resourceName = "spark0"; + } + + @Test + public void testNormal(@Mocked Catalog catalog, @Injectable PaloAuth auth) throws UserException { + new Expectations() { + { + catalog.getAuth(); + result = auth; + auth.checkGlobalPriv((ConnectContext) any, PrivPredicate.ADMIN); + result = true; + } + }; + + Map properties = Maps.newHashMap(); + properties.put("type", "spark"); + CreateResourceStmt stmt = new CreateResourceStmt(true, resourceName, properties); + stmt.analyze(analyzer); + Assert.assertEquals(resourceName, stmt.getResourceName()); + Assert.assertEquals(Resource.ResourceType.SPARK, stmt.getResourceType()); + Assert.assertEquals("CREATE EXTERNAL RESOURCE 'spark0' PROPERTIES(\"type\" = \"spark\")", stmt.toSql()); + } + + @Test(expected = AnalysisException.class) + public void testUnsupportedResourceType(@Mocked Catalog catalog, @Injectable PaloAuth auth) throws UserException { + new Expectations() { + { + catalog.getAuth(); + result = auth; + auth.checkGlobalPriv((ConnectContext) any, PrivPredicate.ADMIN); + result = true; + } + }; + + Map properties = Maps.newHashMap(); + properties.put("type", "hadoop"); + CreateResourceStmt stmt = new CreateResourceStmt(true, resourceName, properties); + stmt.analyze(analyzer); + } +} \ No newline at end of file diff --git a/fe/src/test/java/org/apache/doris/analysis/GrantStmtTest.java b/fe/src/test/java/org/apache/doris/analysis/GrantStmtTest.java index 9dcf7149752056..dc8e1f474479db 100644 --- a/fe/src/test/java/org/apache/doris/analysis/GrantStmtTest.java +++ b/fe/src/test/java/org/apache/doris/analysis/GrantStmtTest.java @@ -94,6 +94,24 @@ public void testNormal() throws AnalysisException, UserException { stmt.analyze(analyzer); } + @Test + public void testResourceNormal() throws UserException { + // TODO(wyb): spark-load + GrantStmt.disableGrantResource = false; + + String resourceName = "spark0"; + List privileges = Lists.newArrayList(AccessPrivilege.USAGE_PRIV); + GrantStmt stmt = new GrantStmt(new UserIdentity("testUser", "%"), null, new ResourcePattern(resourceName), privileges); + stmt.analyze(analyzer); + Assert.assertEquals(resourceName, stmt.getResourcePattern().getResourceName()); + Assert.assertEquals(PaloAuth.PrivLevel.RESOURCE, stmt.getResourcePattern().getPrivLevel()); + + stmt = new GrantStmt(new UserIdentity("testUser", "%"), null, new ResourcePattern("*"), privileges); + stmt.analyze(analyzer); + Assert.assertEquals(PaloAuth.PrivLevel.GLOBAL, stmt.getResourcePattern().getPrivLevel()); + Assert.assertEquals("GRANT Usage_priv ON RESOURCE '*' TO 'testCluster:testUser'@'%'", stmt.toSql()); + } + @Test(expected = AnalysisException.class) public void testUserFail() throws AnalysisException, UserException { GrantStmt stmt; @@ -103,4 +121,4 @@ public void testUserFail() throws AnalysisException, UserException { stmt.analyze(analyzer); Assert.fail("No exeception throws."); } -} \ No newline at end of file +} diff --git a/fe/src/test/java/org/apache/doris/catalog/ResourceMgrTest.java b/fe/src/test/java/org/apache/doris/catalog/ResourceMgrTest.java new file mode 100644 index 00000000000000..3aefd2aa9eae0d --- /dev/null +++ b/fe/src/test/java/org/apache/doris/catalog/ResourceMgrTest.java @@ -0,0 +1,138 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.catalog; + +import org.apache.doris.analysis.AccessTestUtil; +import org.apache.doris.analysis.Analyzer; +import org.apache.doris.analysis.CreateResourceStmt; +import org.apache.doris.analysis.DropResourceStmt; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.UserException; +import org.apache.doris.mysql.privilege.PaloAuth; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.persist.EditLog; +import org.apache.doris.qe.ConnectContext; + +import com.google.common.collect.Maps; +import mockit.Expectations; +import mockit.Injectable; +import mockit.Mocked; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.Map; + +public class ResourceMgrTest { + private String name; + private String type; + private String master; + private String workingDir; + private String broker; + private Map properties; + private Analyzer analyzer; + + @Before + public void setUp() { + name = "spark0"; + type = "spark"; + master = "spark://127.0.0.1:7077"; + workingDir = "hdfs://127.0.0.1/tmp/doris"; + broker = "broker0"; + properties = Maps.newHashMap(); + properties.put("type", type); + properties.put("spark.master", master); + properties.put("spark.submit.deployMode", "cluster"); + properties.put("working_dir", workingDir); + properties.put("broker", broker); + analyzer = AccessTestUtil.fetchAdminAnalyzer(true); + } + + @Test + public void testAddDropResource(@Injectable BrokerMgr brokerMgr, @Injectable EditLog editLog, + @Mocked Catalog catalog, @Injectable PaloAuth auth) throws UserException { + new Expectations() { + { + catalog.getBrokerMgr(); + result = brokerMgr; + brokerMgr.contaisnBroker(broker); + result = true; + catalog.getEditLog(); + result = editLog; + catalog.getAuth(); + result = auth; + auth.checkGlobalPriv((ConnectContext) any, PrivPredicate.ADMIN); + result = true; + } + }; + + // add + ResourceMgr mgr = new ResourceMgr(); + CreateResourceStmt stmt = new CreateResourceStmt(true, name, properties); + stmt.analyze(analyzer); + Assert.assertEquals(0, mgr.getResourceNum()); + mgr.createResource(stmt); + Assert.assertEquals(1, mgr.getResourceNum()); + Assert.assertTrue(mgr.containsResource(name)); + SparkResource resource = (SparkResource) mgr.getResource(name); + Assert.assertNotNull(resource); + Assert.assertEquals(broker, resource.getBroker()); + + // drop + DropResourceStmt dropStmt = new DropResourceStmt(name); + mgr.dropResource(dropStmt); + Assert.assertEquals(0, mgr.getResourceNum()); + } + + @Test(expected = DdlException.class) + public void testAddResourceExist(@Injectable BrokerMgr brokerMgr, @Mocked Catalog catalog, @Injectable PaloAuth auth) + throws UserException { + new Expectations() { + { + catalog.getBrokerMgr(); + result = brokerMgr; + brokerMgr.contaisnBroker(broker); + result = true; + catalog.getAuth(); + result = auth; + auth.checkGlobalPriv((ConnectContext) any, PrivPredicate.ADMIN); + result = true; + } + }; + + // add + ResourceMgr mgr = new ResourceMgr(); + CreateResourceStmt stmt = new CreateResourceStmt(true, name, properties); + stmt.analyze(analyzer); + Assert.assertEquals(0, mgr.getResourceNum()); + mgr.createResource(stmt); + Assert.assertEquals(1, mgr.getResourceNum()); + + // add again + mgr.createResource(stmt); + } + + @Test(expected = DdlException.class) + public void testDropResourceNotExist() throws UserException { + // drop + ResourceMgr mgr = new ResourceMgr(); + Assert.assertEquals(0, mgr.getResourceNum()); + DropResourceStmt stmt = new DropResourceStmt(name); + mgr.dropResource(stmt); + } +} diff --git a/fe/src/test/java/org/apache/doris/catalog/SparkResourceTest.java b/fe/src/test/java/org/apache/doris/catalog/SparkResourceTest.java new file mode 100644 index 00000000000000..30a1172533265c --- /dev/null +++ b/fe/src/test/java/org/apache/doris/catalog/SparkResourceTest.java @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.catalog; + +import org.apache.doris.analysis.AccessTestUtil; +import org.apache.doris.analysis.Analyzer; +import org.apache.doris.analysis.CreateResourceStmt; +//import org.apache.doris.analysis.ResourceDesc; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.UserException; +import org.apache.doris.common.proc.BaseProcResult; +import org.apache.doris.mysql.privilege.PaloAuth; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.qe.ConnectContext; + +import com.google.common.collect.Maps; +import mockit.Expectations; +import mockit.Injectable; +import mockit.Mocked; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.Map; + +public class SparkResourceTest { + private String name; + private String type; + private String master; + private String workingDir; + private String broker; + private Map properties; + private Analyzer analyzer; + + @Before + public void setUp() { + name = "spark0"; + type = "spark"; + master = "spark://127.0.0.1:7077"; + workingDir = "hdfs://127.0.0.1/tmp/doris"; + broker = "broker0"; + properties = Maps.newHashMap(); + properties.put("type", type); + properties.put("spark.master", master); + properties.put("spark.submit.deployMode", "cluster"); + properties.put("working_dir", workingDir); + properties.put("broker", broker); + analyzer = AccessTestUtil.fetchAdminAnalyzer(true); + } + + @Test + public void testFromStmt(@Injectable BrokerMgr brokerMgr, @Mocked Catalog catalog, @Injectable PaloAuth auth) + throws UserException { + new Expectations() { + { + catalog.getBrokerMgr(); + result = brokerMgr; + brokerMgr.contaisnBroker(broker); + result = true; + catalog.getAuth(); + result = auth; + auth.checkGlobalPriv((ConnectContext) any, PrivPredicate.ADMIN); + result = true; + } + }; + + // master: spark, deploy_mode: cluster + CreateResourceStmt stmt = new CreateResourceStmt(true, name, properties); + stmt.analyze(analyzer); + SparkResource resource = (SparkResource) Resource.fromStmt(stmt); + Assert.assertEquals(name, resource.getName()); + Assert.assertEquals(type, resource.getType().name().toLowerCase()); + Assert.assertEquals(master, resource.getMaster()); + Assert.assertEquals("cluster", resource.getDeployMode().name().toLowerCase()); + Assert.assertEquals(workingDir, resource.getWorkingDir()); + Assert.assertEquals(broker, resource.getBroker()); + Assert.assertEquals(2, resource.getSparkConfigs().size()); + Assert.assertFalse(resource.isYarnMaster()); + + // master: spark, deploy_mode: client + properties.put("spark.submit.deployMode", "client"); + stmt = new CreateResourceStmt(true, name, properties); + stmt.analyze(analyzer); + resource = (SparkResource) Resource.fromStmt(stmt); + Assert.assertEquals("client", resource.getDeployMode().name().toLowerCase()); + + // master: yarn, deploy_mode cluster + properties.put("spark.master", "yarn"); + properties.put("spark.submit.deployMode", "cluster"); + properties.put("spark.jars", "xxx.jar,yyy.jar"); + properties.put("spark.files", "/tmp/aaa,/tmp/bbb"); + properties.put("spark.driver.memory", "1g"); + properties.put("spark.hadoop.yarn.resourcemanager.address", "127.0.0.1:9999"); + properties.put("spark.hadoop.fs.defaultFS", "hdfs://127.0.0.1:10000"); + stmt = new CreateResourceStmt(true, name, properties); + stmt.analyze(analyzer); + resource = (SparkResource) Resource.fromStmt(stmt); + Assert.assertTrue(resource.isYarnMaster()); + Map map = resource.getSparkConfigs(); + Assert.assertEquals(7, map.size()); + // test getProcNodeData + BaseProcResult result = new BaseProcResult(); + resource.getProcNodeData(result); + Assert.assertEquals(9, result.getRows().size()); + } + + /* + @Test + public void testUpdate(@Injectable BrokerMgr brokerMgr, @Mocked Catalog catalog, @Injectable PaloAuth auth) + throws UserException { + new Expectations() { + { + catalog.getBrokerMgr(); + result = brokerMgr; + brokerMgr.contaisnBroker(broker); + result = true; + catalog.getAuth(); + result = auth; + auth.checkGlobalPriv((ConnectContext) any, PrivPredicate.ADMIN); + result = true; + } + }; + + properties.put("spark.master", "yarn"); + properties.put("spark.submit.deployMode", "cluster"); + properties.put("spark.driver.memory", "1g"); + properties.put("spark.hadoop.yarn.resourcemanager.address", "127.0.0.1:9999"); + properties.put("spark.hadoop.fs.defaultFS", "hdfs://127.0.0.1:10000"); + CreateResourceStmt stmt = new CreateResourceStmt(true, name, properties); + stmt.analyze(analyzer); + SparkResource resource = (SparkResource) Resource.fromStmt(stmt); + SparkResource copiedResource = resource.getCopiedResource(); + Map newProperties = Maps.newHashMap(); + newProperties.put("spark.executor.memory", "1g"); + newProperties.put("spark.driver.memory", "2g"); + ResourceDesc resourceDesc = new ResourceDesc(name, newProperties); + copiedResource.update(resourceDesc); + Map map = copiedResource.getSparkConfigs(); + Assert.assertEquals(5, resource.getSparkConfigs().size()); + Assert.assertEquals("1g", resource.getSparkConfigs().get("spark.driver.memory")); + Assert.assertEquals(6, map.size()); + Assert.assertEquals("2g", copiedResource.getSparkConfigs().get("spark.driver.memory")); + } + */ + + @Test(expected = DdlException.class) + public void testNoBroker(@Injectable BrokerMgr brokerMgr, @Mocked Catalog catalog, @Injectable PaloAuth auth) + throws UserException { + new Expectations() { + { + catalog.getBrokerMgr(); + result = brokerMgr; + brokerMgr.contaisnBroker(broker); + result = false; + catalog.getAuth(); + result = auth; + auth.checkGlobalPriv((ConnectContext) any, PrivPredicate.ADMIN); + result = true; + } + }; + + CreateResourceStmt stmt = new CreateResourceStmt(true, name, properties); + stmt.analyze(analyzer); + Resource.fromStmt(stmt); + } +} diff --git a/fe/src/test/java/org/apache/doris/mysql/privilege/AuthTest.java b/fe/src/test/java/org/apache/doris/mysql/privilege/AuthTest.java index c9f2f16db30bb4..c7c7b53088ea9f 100644 --- a/fe/src/test/java/org/apache/doris/mysql/privilege/AuthTest.java +++ b/fe/src/test/java/org/apache/doris/mysql/privilege/AuthTest.java @@ -17,13 +17,13 @@ package org.apache.doris.mysql.privilege; -import mockit.Expectations; import org.apache.doris.analysis.Analyzer; import org.apache.doris.analysis.CreateRoleStmt; import org.apache.doris.analysis.CreateUserStmt; import org.apache.doris.analysis.DropRoleStmt; import org.apache.doris.analysis.DropUserStmt; import org.apache.doris.analysis.GrantStmt; +import org.apache.doris.analysis.ResourcePattern; import org.apache.doris.analysis.RevokeStmt; import org.apache.doris.analysis.TablePattern; import org.apache.doris.analysis.UserDesc; @@ -50,7 +50,7 @@ import java.util.List; import java.util.Set; -import mockit.Delegate; +import mockit.Expectations; import mockit.Mocked; public class AuthTest { @@ -1050,4 +1050,270 @@ public void test() throws IllegalAccessException, IllegalArgumentException, Invo } + @Test + public void testResource() { + UserIdentity userIdentity = new UserIdentity("testUser", "%"); + String role = "role0"; + String resourceName = "spark0"; + ResourcePattern resourcePattern = new ResourcePattern(resourceName); + String anyResource = "*"; + ResourcePattern anyResourcePattern = new ResourcePattern(anyResource); + List usagePrivileges = Lists.newArrayList(AccessPrivilege.USAGE_PRIV); + UserDesc userDesc = new UserDesc(userIdentity, "12345", true); + // TODO(wyb): spark-load + GrantStmt.disableGrantResource = false; + + // ------ grant|revoke resource to|from user ------ + // 1. create user with no role + CreateUserStmt createUserStmt = new CreateUserStmt(false, userDesc, null); + try { + createUserStmt.analyze(analyzer); + auth.createUser(createUserStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + + // 2. grant usage_priv on resource 'spark0' to 'testUser'@'%' + GrantStmt grantStmt = new GrantStmt(userIdentity, null, resourcePattern, usagePrivileges); + try { + grantStmt.analyze(analyzer); + auth.grant(grantStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + Assert.assertTrue(auth.checkResourcePriv(userIdentity, resourceName, PrivPredicate.USAGE)); + Assert.assertFalse(auth.checkGlobalPriv(userIdentity, PrivPredicate.USAGE)); + + // 3. revoke usage_priv on resource 'spark0' from 'testUser'@'%' + RevokeStmt revokeStmt = new RevokeStmt(userIdentity, null, resourcePattern, usagePrivileges); + try { + revokeStmt.analyze(analyzer); + auth.revoke(revokeStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + Assert.assertFalse(auth.checkResourcePriv(userIdentity, resourceName, PrivPredicate.USAGE)); + Assert.assertFalse(auth.checkGlobalPriv(userIdentity, PrivPredicate.USAGE)); + + // 4. drop user + DropUserStmt dropUserStmt = new DropUserStmt(userIdentity); + try { + dropUserStmt.analyze(analyzer); + auth.dropUser(dropUserStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + + // ------ grant|revoke resource to|from role ------ + // 1. create role + CreateRoleStmt roleStmt = new CreateRoleStmt(role); + try { + roleStmt.analyze(analyzer); + auth.createRole(roleStmt); + } catch (UserException e1) { + e1.printStackTrace(); + Assert.fail(); + } + // grant usage_priv on resource 'spark0' to role 'role0' + grantStmt = new GrantStmt(null, role, resourcePattern, usagePrivileges); + try { + grantStmt.analyze(analyzer); + auth.grant(grantStmt); + } catch (UserException e1) { + e1.printStackTrace(); + Assert.fail(); + } + + // 2. create user with role + createUserStmt = new CreateUserStmt(false, userDesc, role); + try { + createUserStmt.analyze(analyzer); + auth.createUser(createUserStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + Assert.assertTrue(auth.checkResourcePriv(userIdentity, resourceName, PrivPredicate.USAGE)); + Assert.assertFalse(auth.checkGlobalPriv(userIdentity, PrivPredicate.USAGE)); + + // 3. revoke usage_priv on resource 'spark0' from role 'role0' + revokeStmt = new RevokeStmt(null, role, resourcePattern, usagePrivileges); + try { + revokeStmt.analyze(analyzer); + auth.revoke(revokeStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + // also revoke from user with this role + Assert.assertFalse(auth.checkResourcePriv(userIdentity, resourceName, PrivPredicate.USAGE)); + Assert.assertFalse(auth.checkGlobalPriv(userIdentity, PrivPredicate.USAGE)); + + // 4. drop user and role + dropUserStmt = new DropUserStmt(userIdentity); + try { + dropUserStmt.analyze(analyzer); + auth.dropUser(dropUserStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + DropRoleStmt dropRoleStmt = new DropRoleStmt(role); + try { + dropRoleStmt.analyze(analyzer); + auth.dropRole(dropRoleStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + + // ------ grant|revoke any resource to|from user ------ + // 1. create user with no role + createUserStmt = new CreateUserStmt(false, userDesc, null); + try { + createUserStmt.analyze(analyzer); + auth.createUser(createUserStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + + // 2. grant usage_priv on resource '*' to 'testUser'@'%' + grantStmt = new GrantStmt(userIdentity, null, anyResourcePattern, usagePrivileges); + try { + grantStmt.analyze(analyzer); + auth.grant(grantStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + Assert.assertTrue(auth.checkResourcePriv(userIdentity, resourceName, PrivPredicate.USAGE)); + Assert.assertTrue(auth.checkGlobalPriv(userIdentity, PrivPredicate.USAGE)); + + // 3. revoke usage_priv on resource '*' from 'testUser'@'%' + revokeStmt = new RevokeStmt(userIdentity, null, anyResourcePattern, usagePrivileges); + try { + revokeStmt.analyze(analyzer); + auth.revoke(revokeStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + Assert.assertFalse(auth.checkResourcePriv(userIdentity, resourceName, PrivPredicate.USAGE)); + Assert.assertFalse(auth.checkGlobalPriv(userIdentity, PrivPredicate.USAGE)); + + // 4. drop user + dropUserStmt = new DropUserStmt(userIdentity); + try { + dropUserStmt.analyze(analyzer); + auth.dropUser(dropUserStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + + // ------ grant|revoke any resource to|from role ------ + // 1. create role + roleStmt = new CreateRoleStmt(role); + try { + roleStmt.analyze(analyzer); + auth.createRole(roleStmt); + } catch (UserException e1) { + e1.printStackTrace(); + Assert.fail(); + } + // grant usage_priv on resource '*' to role 'role0' + grantStmt = new GrantStmt(null, role, anyResourcePattern, usagePrivileges); + try { + grantStmt.analyze(analyzer); + auth.grant(grantStmt); + } catch (UserException e1) { + e1.printStackTrace(); + Assert.fail(); + } + + // 2. create user with role + createUserStmt = new CreateUserStmt(false, userDesc, role); + try { + createUserStmt.analyze(analyzer); + auth.createUser(createUserStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + Assert.assertTrue(auth.checkResourcePriv(userIdentity, resourceName, PrivPredicate.USAGE)); + Assert.assertTrue(auth.checkGlobalPriv(userIdentity, PrivPredicate.USAGE)); + + // 3. revoke usage_priv on resource '*' from role 'role0' + revokeStmt = new RevokeStmt(null, role, anyResourcePattern, usagePrivileges); + try { + revokeStmt.analyze(analyzer); + auth.revoke(revokeStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + // also revoke from user with this role + Assert.assertFalse(auth.checkResourcePriv(userIdentity, resourceName, PrivPredicate.USAGE)); + Assert.assertFalse(auth.checkGlobalPriv(userIdentity, PrivPredicate.USAGE)); + + // 4. drop user and role + dropUserStmt = new DropUserStmt(userIdentity); + try { + dropUserStmt.analyze(analyzer); + auth.dropUser(dropUserStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + dropRoleStmt = new DropRoleStmt(role); + try { + dropRoleStmt.analyze(analyzer); + auth.dropRole(dropRoleStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + + // ------ error case ------ + boolean hasException = false; + createUserStmt = new CreateUserStmt(false, userDesc, null); + try { + createUserStmt.analyze(analyzer); + auth.createUser(createUserStmt); + } catch (UserException e) { + e.printStackTrace(); + Assert.fail(); + } + + // 1. grant db table priv to resource + List privileges = Lists.newArrayList(AccessPrivilege.SELECT_PRIV); + grantStmt = new GrantStmt(userIdentity, null, resourcePattern, privileges); + hasException = false; + try { + grantStmt.analyze(analyzer); + auth.grant(grantStmt); + } catch (UserException e) { + e.printStackTrace(); + hasException = true; + } + Assert.assertTrue(hasException); + + // 2. grant resource priv to db table + TablePattern tablePattern = new TablePattern("db1", "*"); + grantStmt = new GrantStmt(userIdentity, null, tablePattern, usagePrivileges); + hasException = false; + try { + grantStmt.analyze(analyzer); + auth.grant(grantStmt); + } catch (UserException e) { + e.printStackTrace(); + hasException = true; + } + Assert.assertTrue(hasException); + } }