Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ statement
(PARTITIONED BY partitionColumnNames=identifierList)?
bucketSpec? locationSpec?
(COMMENT comment=STRING)?
(TBLPROPERTIES tableProps=tablePropertyList)?
(AS? query)? #createTable
| createTableHeader ('(' columns=colTypeList ')')?
(COMMENT comment=STRING)?
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,8 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder(conf) {
* ]
* [LOCATION path]
* [COMMENT table_comment]
* [AS select_statement];
* [TBLPROPERTIES (property_name=property_value, ...)]
* [[AS] select_statement];
* }}}
*/
override def visitCreateTable(ctx: CreateTableContext): LogicalPlan = withOrigin(ctx) {
Expand All @@ -400,6 +401,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder(conf) {
Option(ctx.partitionColumnNames)
.map(visitIdentifierList(_).toArray)
.getOrElse(Array.empty[String])
val properties = Option(ctx.tableProps).map(visitPropertyKeyValues).getOrElse(Map.empty)
val bucketSpec = Option(ctx.bucketSpec()).map(visitBucketSpec)

val location = Option(ctx.locationSpec).map(visitLocationSpec)
Expand All @@ -410,7 +412,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder(conf) {
"LOCATION and 'path' in OPTIONS are both used to indicate the custom table path, " +
"you can only specify one of them.", ctx)
}
val customLocation = storage.locationUri.orElse(location.map(CatalogUtils.stringToURI(_)))
val customLocation = storage.locationUri.orElse(location.map(CatalogUtils.stringToURI))

val tableType = if (customLocation.isDefined) {
CatalogTableType.EXTERNAL
Expand All @@ -426,6 +428,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder(conf) {
provider = Some(provider),
partitionColumnNames = partitionColumnNames,
bucketSpec = bucketSpec,
properties = properties,
comment = Option(ctx.comment).map(string))

// Determine the storage mode.
Expand Down
3 changes: 2 additions & 1 deletion sql/core/src/test/resources/sql-tests/inputs/describe.sql
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
CREATE TABLE t (a STRING, b INT, c STRING, d STRING) USING parquet
OPTIONS (a '1', b '2')
PARTITIONED BY (c, d) CLUSTERED BY (a) SORTED BY (b ASC) INTO 2 BUCKETS
COMMENT 'table_comment';
COMMENT 'table_comment'
TBLPROPERTIES (t 'test');

CREATE TEMPORARY VIEW temp_v AS SELECT * FROM t;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ two 2 two 2 one 1 two 2
two 2 two 2 three 3 two 2
two 2 two 2 two 2 two 2


Copy link
Member Author

@gatorsmile gatorsmile Aug 31, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was automatically added when I generating the result of SQLQueryTestSuite

-- !query 12
SELECT * FROM nt1 CROSS JOIN nt2 ON (nt1.k > nt2.k)
-- !query 12 schema
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ CREATE TABLE t (a STRING, b INT, c STRING, d STRING) USING parquet
OPTIONS (a '1', b '2')
PARTITIONED BY (c, d) CLUSTERED BY (a) SORTED BY (b ASC) INTO 2 BUCKETS
COMMENT 'table_comment'
TBLPROPERTIES (t 'test')
-- !query 0 schema
struct<>
-- !query 0 output
Expand Down Expand Up @@ -129,7 +130,7 @@ Num Buckets 2
Bucket Columns [`a`]
Sort Columns [`b`]
Comment table_comment
Table Properties [e=3]
Table Properties [t=test, e=3]
Location [not included in comparison]sql/core/spark-warehouse/t
Storage Properties [a=1, b=2]
Partition Provider Catalog
Expand Down Expand Up @@ -161,7 +162,7 @@ Num Buckets 2
Bucket Columns [`a`]
Sort Columns [`b`]
Comment table_comment
Table Properties [e=3]
Table Properties [t=test, e=3]
Location [not included in comparison]sql/core/spark-warehouse/t
Storage Properties [a=1, b=2]
Partition Provider Catalog
Expand Down Expand Up @@ -201,6 +202,7 @@ Num Buckets 2
Bucket Columns [`a`]
Sort Columns [`b`]
Comment table_comment
Table Properties [t=test]
Location [not included in comparison]sql/core/spark-warehouse/t
Storage Properties [a=1, b=2]
Partition Provider Catalog
Expand Down Expand Up @@ -239,6 +241,7 @@ Provider parquet
Num Buckets 2
Bucket Columns [`a`]
Sort Columns [`b`]
Table Properties [t=test]
Location [not included in comparison]sql/core/spark-warehouse/t
Storage Properties [a=1, b=2]
Partition Provider Catalog
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,4 +117,12 @@ class OptimizeMetadataOnlyQuerySuite extends QueryTest with SharedSQLContext {
"select partcol1, max(partcol2) from srcpart where partcol1 = 0 group by rollup (partcol1)",
"select partcol2 from (select partcol2 from srcpart where partcol1 = 0 union all " +
"select partcol2 from srcpart where partcol1 = 1) t group by partcol2")

test("SPARK-21884 Fix StackOverflowError on MetadataOnlyQuery") {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here, just add a test case from #19094 for verifying the fix in #18686

Copy link
Member

@dongjoon-hyun dongjoon-hyun Sep 1, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1 for preventing futhre regression! Thanks.

withTable("t_1000") {
sql("CREATE TABLE t_1000 (a INT, p INT) USING PARQUET PARTITIONED BY (p)")
(1 to 1000).foreach(p => sql(s"ALTER TABLE t_1000 ADD PARTITION (p=$p)"))
sql("SELECT COUNT(DISTINCT p) FROM t_1000").collect()
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,26 @@ class DDLParserSuite extends PlanTest with SharedSQLContext {
}
}

test("create table - with table properties") {
val sql = "CREATE TABLE my_tab(a INT, b STRING) USING parquet TBLPROPERTIES('test' = 'test')"

val expectedTableDesc = CatalogTable(
identifier = TableIdentifier("my_tab"),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("a", IntegerType).add("b", StringType),
provider = Some("parquet"),
properties = Map("test" -> "test"))

parser.parsePlan(sql) match {
case CreateTable(tableDesc, _, None) =>
assert(tableDesc == expectedTableDesc.copy(createTime = tableDesc.createTime))
case other =>
fail(s"Expected to parse ${classOf[CreateTableCommand].getClass.getName} from query," +
s"got ${other.getClass.getName}: $sql")
}
}

test("create table - with location") {
val v1 = "CREATE TABLE my_tab(a INT, b STRING) USING parquet LOCATION '/tmp/file'"

Expand Down