diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala index 71e23175168e..c1ec736c32ed 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala @@ -104,12 +104,12 @@ case class UnresolvedAttribute(nameParts: Seq[String]) extends Attribute with Un override def exprId: ExprId = throw new UnresolvedException(this, "exprId") override def dataType: DataType = throw new UnresolvedException(this, "dataType") override def nullable: Boolean = throw new UnresolvedException(this, "nullable") - override def qualifier: Option[String] = throw new UnresolvedException(this, "qualifier") + override def qualifier: Seq[String] = throw new UnresolvedException(this, "qualifier") override lazy val resolved = false override def newInstance(): UnresolvedAttribute = this override def withNullability(newNullability: Boolean): UnresolvedAttribute = this - override def withQualifier(newQualifier: Option[String]): UnresolvedAttribute = this + override def withQualifier(newQualifier: Seq[String]): UnresolvedAttribute = this override def withName(newName: String): UnresolvedAttribute = UnresolvedAttribute.quoted(newName) override def withMetadata(newMetadata: Metadata): Attribute = this @@ -240,7 +240,7 @@ abstract class Star extends LeafExpression with NamedExpression { override def exprId: ExprId = throw new UnresolvedException(this, "exprId") override def dataType: DataType = throw new UnresolvedException(this, "dataType") override def nullable: Boolean = throw new UnresolvedException(this, "nullable") - override def qualifier: Option[String] = throw new UnresolvedException(this, "qualifier") + override def qualifier: Seq[String] = throw new UnresolvedException(this, "qualifier") override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute") override def newInstance(): NamedExpression = throw new UnresolvedException(this, "newInstance") override lazy val resolved = false @@ -262,17 +262,46 @@ abstract class Star extends LeafExpression with NamedExpression { */ case class UnresolvedStar(target: Option[Seq[String]]) extends Star with Unevaluable { - override def expand(input: LogicalPlan, resolver: Resolver): Seq[NamedExpression] = { + /** + * Returns true if the nameParts match the qualifier of the attribute + * + * There are two checks: i) Check if the nameParts match the qualifier fully. + * E.g. SELECT db.t1.* FROM db1.t1 In this case, the nameParts is Seq("db1", "t1") and + * qualifier of the attribute is Seq("db1","t1") + * ii) If (i) is not true, then check if nameParts is only a single element and it + * matches the table portion of the qualifier + * + * E.g. SELECT t1.* FROM db1.t1 In this case nameParts is Seq("t1") and + * qualifier is Seq("db1","t1") + * SELECT a.* FROM db1.t1 AS a + * In this case nameParts is Seq("a") and qualifier for + * attribute is Seq("a") + */ + private def matchedQualifier( + attribute: Attribute, + nameParts: Seq[String], + resolver: Resolver): Boolean = { + val qualifierList = attribute.qualifier + + val matched = nameParts.corresponds(qualifierList)(resolver) || { + // check if it matches the table portion of the qualifier + if (nameParts.length == 1 && qualifierList.nonEmpty) { + resolver(nameParts.head, qualifierList.last) + } else { + false + } + } + matched + } + + override def expand( + input: LogicalPlan, + resolver: Resolver): Seq[NamedExpression] = { // If there is no table specified, use all input attributes. if (target.isEmpty) return input.output - val expandedAttributes = - if (target.get.size == 1) { - // If there is a table, pick out attributes that are part of this table. - input.output.filter(_.qualifier.exists(resolver(_, target.get.head))) - } else { - List() - } + val expandedAttributes = input.output.filter(matchedQualifier(_, target.get, resolver)) + if (expandedAttributes.nonEmpty) return expandedAttributes // Try to resolve it as a struct expansion. If there is a conflict and both are possible, @@ -316,8 +345,8 @@ case class UnresolvedRegex(regexPattern: String, table: Option[String], caseSens // If there is no table specified, use all input attributes that match expr case None => input.output.filter(_.name.matches(pattern)) // If there is a table, pick out attributes that are part of this table that match expr - case Some(t) => input.output.filter(_.qualifier.exists(resolver(_, t))) - .filter(_.name.matches(pattern)) + case Some(t) => input.output.filter(a => a.qualifier.nonEmpty && + resolver(a.qualifier.last, t)).filter(_.name.matches(pattern)) } } @@ -345,7 +374,7 @@ case class MultiAlias(child: Expression, names: Seq[String]) override def nullable: Boolean = throw new UnresolvedException(this, "nullable") - override def qualifier: Option[String] = throw new UnresolvedException(this, "qualifier") + override def qualifier: Seq[String] = throw new UnresolvedException(this, "qualifier") override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute") @@ -403,7 +432,7 @@ case class UnresolvedAlias( extends UnaryExpression with NamedExpression with Unevaluable { override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute") - override def qualifier: Option[String] = throw new UnresolvedException(this, "qualifier") + override def qualifier: Seq[String] = throw new UnresolvedException(this, "qualifier") override def exprId: ExprId = throw new UnresolvedException(this, "exprId") override def nullable: Boolean = throw new UnresolvedException(this, "nullable") override def dataType: DataType = throw new UnresolvedException(this, "dataType") diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala index 2f60eb30f724..cd243b87652f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala @@ -684,6 +684,7 @@ class SessionCatalog( * * If the relation is a view, we generate a [[View]] operator from the view description, and * wrap the logical plan in a [[SubqueryAlias]] which will track the name of the view. + * [[SubqueryAlias]] will also keep track of the name and database(optional) of the table/view * * @param name The name of the table/view that we look up. */ @@ -693,7 +694,7 @@ class SessionCatalog( val table = formatTableName(name.table) if (db == globalTempViewManager.database) { globalTempViewManager.get(table).map { viewDef => - SubqueryAlias(table, viewDef) + SubqueryAlias(table, db, viewDef) }.getOrElse(throw new NoSuchTableException(db, table)) } else if (name.database.isDefined || !tempViews.contains(table)) { val metadata = externalCatalog.getTable(db, table) @@ -706,9 +707,9 @@ class SessionCatalog( desc = metadata, output = metadata.schema.toAttributes, child = parser.parsePlan(viewText)) - SubqueryAlias(table, child) + SubqueryAlias(table, db, child) } else { - SubqueryAlias(table, UnresolvedCatalogRelation(metadata)) + SubqueryAlias(table, db, UnresolvedCatalogRelation(metadata)) } } else { SubqueryAlias(table, tempViews(table)) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/higherOrderFunctions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/higherOrderFunctions.scala index e15225ffbd2d..e95d98fda84c 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/higherOrderFunctions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/higherOrderFunctions.scala @@ -40,13 +40,13 @@ case class NamedLambdaVariable( with NamedExpression with CodegenFallback { - override def qualifier: Option[String] = None + override def qualifier: Seq[String] = Seq.empty override def newInstance(): NamedExpression = copy(value = new AtomicReference(), exprId = NamedExpression.newExprId) override def toAttribute: Attribute = { - AttributeReference(name, dataType, nullable, Metadata.empty)(exprId, None) + AttributeReference(name, dataType, nullable, Metadata.empty)(exprId, Seq.empty) } override def eval(input: InternalRow): Any = value.get diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala index ce5c2804d08e..584a2946bd56 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala @@ -71,19 +71,22 @@ trait NamedExpression extends Expression { * multiple qualifiers, it is possible that there are other possible way to refer to this * attribute. */ - def qualifiedName: String = (qualifier.toSeq :+ name).mkString(".") + def qualifiedName: String = (qualifier :+ name).mkString(".") /** * Optional qualifier for the expression. + * Qualifier can also contain the fully qualified information, for e.g, Sequence of string + * containing the database and the table name * * For now, since we do not allow using original table name to qualify a column name once the * table is aliased, this can only be: * * 1. Empty Seq: when an attribute doesn't have a qualifier, * e.g. top level attributes aliased in the SELECT clause, or column from a LocalRelation. - * 2. Single element: either the table name or the alias name of the table. + * 2. Seq with a Single element: either the table name or the alias name of the table. + * 3. Seq with 2 elements: database name and table name */ - def qualifier: Option[String] + def qualifier: Seq[String] def toAttribute: Attribute @@ -109,7 +112,7 @@ abstract class Attribute extends LeafExpression with NamedExpression with NullIn override def references: AttributeSet = AttributeSet(this) def withNullability(newNullability: Boolean): Attribute - def withQualifier(newQualifier: Option[String]): Attribute + def withQualifier(newQualifier: Seq[String]): Attribute def withName(newName: String): Attribute def withMetadata(newMetadata: Metadata): Attribute @@ -130,14 +133,14 @@ abstract class Attribute extends LeafExpression with NamedExpression with NullIn * @param name The name to be associated with the result of computing [[child]]. * @param exprId A globally unique id used to check if an [[AttributeReference]] refers to this * alias. Auto-assigned if left blank. - * @param qualifier An optional string that can be used to referred to this attribute in a fully - * qualified way. Consider the examples tableName.name, subQueryAlias.name. - * tableName and subQueryAlias are possible qualifiers. + * @param qualifier An optional Seq of string that can be used to refer to this attribute in a + * fully qualified way. Consider the examples tableName.name, subQueryAlias.name. + * tableName and subQueryAlias are possible qualifiers. * @param explicitMetadata Explicit metadata associated with this alias that overwrites child's. */ case class Alias(child: Expression, name: String)( val exprId: ExprId = NamedExpression.newExprId, - val qualifier: Option[String] = None, + val qualifier: Seq[String] = Seq.empty, val explicitMetadata: Option[Metadata] = None) extends UnaryExpression with NamedExpression { @@ -201,7 +204,7 @@ case class Alias(child: Expression, name: String)( } override def sql: String = { - val qualifierPrefix = qualifier.map(_ + ".").getOrElse("") + val qualifierPrefix = if (qualifier.nonEmpty) qualifier.mkString(".") + "." else "" s"${child.sql} AS $qualifierPrefix${quoteIdentifier(name)}" } } @@ -225,9 +228,11 @@ case class AttributeReference( nullable: Boolean = true, override val metadata: Metadata = Metadata.empty)( val exprId: ExprId = NamedExpression.newExprId, - val qualifier: Option[String] = None) + val qualifier: Seq[String] = Seq.empty[String]) extends Attribute with Unevaluable { + // currently can only handle qualifier of length 2 + require(qualifier.length <= 2) /** * Returns true iff the expression id is the same for both attributes. */ @@ -286,7 +291,7 @@ case class AttributeReference( /** * Returns a copy of this [[AttributeReference]] with new qualifier. */ - override def withQualifier(newQualifier: Option[String]): AttributeReference = { + override def withQualifier(newQualifier: Seq[String]): AttributeReference = { if (newQualifier == qualifier) { this } else { @@ -324,7 +329,7 @@ case class AttributeReference( override def simpleString: String = s"$name#${exprId.id}: ${dataType.simpleString}" override def sql: String = { - val qualifierPrefix = qualifier.map(_ + ".").getOrElse("") + val qualifierPrefix = if (qualifier.nonEmpty) qualifier.mkString(".") + "." else "" s"$qualifierPrefix${quoteIdentifier(name)}" } } @@ -350,12 +355,12 @@ case class PrettyAttribute( override def withNullability(newNullability: Boolean): Attribute = throw new UnsupportedOperationException override def newInstance(): Attribute = throw new UnsupportedOperationException - override def withQualifier(newQualifier: Option[String]): Attribute = + override def withQualifier(newQualifier: Seq[String]): Attribute = throw new UnsupportedOperationException override def withName(newName: String): Attribute = throw new UnsupportedOperationException override def withMetadata(newMetadata: Metadata): Attribute = throw new UnsupportedOperationException - override def qualifier: Option[String] = throw new UnsupportedOperationException + override def qualifier: Seq[String] = throw new UnsupportedOperationException override def exprId: ExprId = throw new UnsupportedOperationException override def nullable: Boolean = true } @@ -371,7 +376,7 @@ case class OuterReference(e: NamedExpression) override def prettyName: String = "outer" override def name: String = e.name - override def qualifier: Option[String] = e.qualifier + override def qualifier: Seq[String] = e.qualifier override def exprId: ExprId = e.exprId override def toAttribute: Attribute = e.toAttribute override def newInstance(): NamedExpression = OuterReference(e.newInstance()) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/package.scala index 8a06daa37132..11dcc3ebf798 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/package.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/package.scala @@ -152,10 +152,22 @@ package object expressions { unique(attrs.groupBy(_.name.toLowerCase(Locale.ROOT))) } - /** Map to use for qualified case insensitive attribute lookups. */ - @transient private val qualified: Map[(String, String), Seq[Attribute]] = { - val grouped = attrs.filter(_.qualifier.isDefined).groupBy { a => - (a.qualifier.get.toLowerCase(Locale.ROOT), a.name.toLowerCase(Locale.ROOT)) + /** Map to use for qualified case insensitive attribute lookups with 2 part key */ + @transient private lazy val qualified: Map[(String, String), Seq[Attribute]] = { + // key is 2 part: table/alias and name + val grouped = attrs.filter(_.qualifier.nonEmpty).groupBy { + a => (a.qualifier.last.toLowerCase(Locale.ROOT), a.name.toLowerCase(Locale.ROOT)) + } + unique(grouped) + } + + /** Map to use for qualified case insensitive attribute lookups with 3 part key */ + @transient private val qualified3Part: Map[(String, String, String), Seq[Attribute]] = { + // key is 3 part: database name, table name and name + val grouped = attrs.filter(_.qualifier.length == 2).groupBy { a => + (a.qualifier.head.toLowerCase(Locale.ROOT), + a.qualifier.last.toLowerCase(Locale.ROOT), + a.name.toLowerCase(Locale.ROOT)) } unique(grouped) } @@ -169,25 +181,48 @@ package object expressions { }) } - // Find matches for the given name assuming that the 1st part is a qualifier (i.e. table name, - // alias, or subquery alias) and the 2nd part is the actual name. This returns a tuple of + // Find matches for the given name assuming that the 1st two parts are qualifier + // (i.e. database name and table name) and the 3rd part is the actual column name. + // + // For example, consider an example where "db1" is the database name, "a" is the table name + // and "b" is the column name and "c" is the struct field name. + // If the name parts is db1.a.b.c, then Attribute will match + // Attribute(b, qualifier("db1,"a")) and List("c") will be the second element + var matches: (Seq[Attribute], Seq[String]) = nameParts match { + case dbPart +: tblPart +: name +: nestedFields => + val key = (dbPart.toLowerCase(Locale.ROOT), + tblPart.toLowerCase(Locale.ROOT), name.toLowerCase(Locale.ROOT)) + val attributes = collectMatches(name, qualified3Part.get(key)).filter { + a => (resolver(dbPart, a.qualifier.head) && resolver(tblPart, a.qualifier.last)) + } + (attributes, nestedFields) + case all => + (Seq.empty, Seq.empty) + } + + // If there are no matches, then find matches for the given name assuming that + // the 1st part is a qualifier (i.e. table name, alias, or subquery alias) and the + // 2nd part is the actual name. This returns a tuple of // matched attributes and a list of parts that are to be resolved. // // For example, consider an example where "a" is the table name, "b" is the column name, // and "c" is the struct field name, i.e. "a.b.c". In this case, Attribute will be "a.b", // and the second element will be List("c"). - val matches = nameParts match { - case qualifier +: name +: nestedFields => - val key = (qualifier.toLowerCase(Locale.ROOT), name.toLowerCase(Locale.ROOT)) - val attributes = collectMatches(name, qualified.get(key)).filter { a => - resolver(qualifier, a.qualifier.get) - } - (attributes, nestedFields) - case all => - (Nil, all) + if (matches._1.isEmpty) { + matches = nameParts match { + case qualifier +: name +: nestedFields => + val key = (qualifier.toLowerCase(Locale.ROOT), name.toLowerCase(Locale.ROOT)) + val attributes = collectMatches(name, qualified.get(key)).filter { a => + resolver(qualifier, a.qualifier.last) + } + (attributes, nestedFields) + case all => + (Seq.empty[Attribute], Seq.empty[String]) + } } - // If none of attributes match `table.column` pattern, we try to resolve it as a column. + // If none of attributes match database.table.column pattern or + // `table.column` pattern, we try to resolve it as a column. val (candidates, nestedFields) = matches match { case (Seq(), _) => val name = nameParts.head diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/identifiers.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/identifiers.scala index a3cc4529b545..deceec73dda3 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/identifiers.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/identifiers.scala @@ -47,6 +47,22 @@ sealed trait IdentifierWithDatabase { override def toString: String = quotedString } +/** + * Encapsulates an identifier that is either a alias name or an identifier that has table + * name and optionally a database name. + * The SubqueryAlias node keeps track of the qualifier using the information in this structure + * @param identifier - Is an alias name or a table name + * @param database - Is a database name and is optional + */ +case class AliasIdentifier(identifier: String, database: Option[String]) + extends IdentifierWithDatabase { + + def this(identifier: String) = this(identifier, None) +} + +object AliasIdentifier { + def apply(identifier: String): AliasIdentifier = new AliasIdentifier(identifier) +} /** * Identifies a table in a database. diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala index d7dbdb39a9af..2bae21d4f229 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala @@ -17,6 +17,7 @@ package org.apache.spark.sql.catalyst.plans.logical +import org.apache.spark.sql.catalyst.{AliasIdentifier} import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable} import org.apache.spark.sql.catalyst.expressions._ @@ -113,7 +114,7 @@ case class Generate( def qualifiedGeneratorOutput: Seq[Attribute] = { val qualifiedOutput = qualifier.map { q => // prepend the new qualifier to the existed one - generatorOutput.map(a => a.withQualifier(Some(q))) + generatorOutput.map(a => a.withQualifier(Seq(q))) }.getOrElse(generatorOutput) val nullableOutput = qualifiedOutput.map { // if outer, make all attributes nullable, otherwise keep existing nullability @@ -794,19 +795,37 @@ case class LocalLimit(limitExpr: Expression, child: LogicalPlan) extends OrderPr /** * Aliased subquery. * - * @param alias the alias name for this subquery. + * @param name the alias identifier for this subquery. * @param child the logical plan of this subquery. */ case class SubqueryAlias( - alias: String, + name: AliasIdentifier, child: LogicalPlan) extends OrderPreservingUnaryNode { - override def doCanonicalize(): LogicalPlan = child.canonicalized + def alias: String = name.identifier - override def output: Seq[Attribute] = child.output.map(_.withQualifier(Some(alias))) + override def output: Seq[Attribute] = { + val qualifierList = name.database.map(Seq(_, alias)).getOrElse(Seq(alias)) + child.output.map(_.withQualifier(qualifierList)) + } + override def doCanonicalize(): LogicalPlan = child.canonicalized } +object SubqueryAlias { + def apply( + identifier: String, + child: LogicalPlan): SubqueryAlias = { + SubqueryAlias(AliasIdentifier(identifier), child) + } + + def apply( + identifier: String, + database: String, + child: LogicalPlan): SubqueryAlias = { + SubqueryAlias(AliasIdentifier(identifier, Some(database)), child) + } +} /** * Sample the dataset. * diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala index 50496a041052..89fabd477406 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala @@ -18,7 +18,7 @@ package org.apache.spark.sql.catalyst.catalog import org.apache.spark.sql.AnalysisException -import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} +import org.apache.spark.sql.catalyst.{AliasIdentifier, FunctionIdentifier, TableIdentifier} import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.parser.CatalystSqlParser @@ -537,11 +537,11 @@ abstract class SessionCatalogSuite extends AnalysisTest { val view = View(desc = metadata, output = metadata.schema.toAttributes, child = CatalystSqlParser.parsePlan(metadata.viewText.get)) comparePlans(catalog.lookupRelation(TableIdentifier("view1", Some("db3"))), - SubqueryAlias("view1", view)) + SubqueryAlias("view1", "db3", view)) // Look up a view using current database of the session catalog. catalog.setCurrentDatabase("db3") comparePlans(catalog.lookupRelation(TableIdentifier("view1")), - SubqueryAlias("view1", view)) + SubqueryAlias("view1", "db3", view)) } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSetSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSetSuite.scala index 12eddf557109..3ccaa5976cc2 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSetSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSetSuite.scala @@ -41,7 +41,7 @@ class ExpressionSetSuite extends SparkFunSuite { // maxHash's hashcode is calculated based on this exprId's hashcode, so we set this // exprId's hashCode to this specific value to make sure maxHash's hashcode is // `Int.MaxValue` - override def hashCode: Int = -1030353449 + override def hashCode: Int = 1394598635 // We are implementing this equals() only because the style-checking rule "you should // implement equals and hashCode together" requires us to override def equals(obj: Any): Boolean = super.equals(obj) @@ -57,7 +57,7 @@ class ExpressionSetSuite extends SparkFunSuite { // minHash's hashcode is calculated based on this exprId's hashcode, so we set this // exprId's hashCode to this specific value to make sure minHash's hashcode is // `Int.MinValue` - override def hashCode: Int = 1407330692 + override def hashCode: Int = -462684520 // We are implementing this equals() only because the style-checking rule "you should // implement equals and hashCode together" requires us to override def equals(obj: Any): Boolean = super.equals(obj) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/SubexpressionEliminationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/SubexpressionEliminationSuite.scala index c48730bd9d1c..1fa185cc77eb 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/SubexpressionEliminationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/SubexpressionEliminationSuite.scala @@ -30,7 +30,7 @@ class SubexpressionEliminationSuite extends SparkFunSuite { } val b1 = a.withName("name2").withExprId(id) val b2 = a.withExprId(id) - val b3 = a.withQualifier(Some("qualifierName")) + val b3 = a.withQualifier(Seq("qualifierName")) assert(b1 != b2) assert(a != b1) diff --git a/sql/core/src/test/resources/sql-tests/inputs/columnresolution-views.sql b/sql/core/src/test/resources/sql-tests/inputs/columnresolution-views.sql index d3f928751757..83c32a5bf243 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/columnresolution-views.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/columnresolution-views.sql @@ -13,10 +13,8 @@ DROP VIEW view1; -- Test scenario with Global Temp view CREATE OR REPLACE GLOBAL TEMPORARY VIEW view1 as SELECT 1 as i1; SELECT * FROM global_temp.view1; --- TODO: Support this scenario SELECT global_temp.view1.* FROM global_temp.view1; SELECT i1 FROM global_temp.view1; --- TODO: Support this scenario SELECT global_temp.view1.i1 FROM global_temp.view1; SELECT view1.i1 FROM global_temp.view1; SELECT a.i1 FROM global_temp.view1 AS a; diff --git a/sql/core/src/test/resources/sql-tests/inputs/columnresolution.sql b/sql/core/src/test/resources/sql-tests/inputs/columnresolution.sql index 79e90ad3de91..d001185a7393 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/columnresolution.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/columnresolution.sql @@ -14,9 +14,7 @@ SELECT i1 FROM mydb1.t1; SELECT t1.i1 FROM t1; SELECT t1.i1 FROM mydb1.t1; --- TODO: Support this scenario SELECT mydb1.t1.i1 FROM t1; --- TODO: Support this scenario SELECT mydb1.t1.i1 FROM mydb1.t1; USE mydb2; @@ -24,7 +22,6 @@ SELECT i1 FROM t1; SELECT i1 FROM mydb1.t1; SELECT t1.i1 FROM t1; SELECT t1.i1 FROM mydb1.t1; --- TODO: Support this scenario SELECT mydb1.t1.i1 FROM mydb1.t1; -- Scenario: resolve fully qualified table name in star expansion @@ -34,7 +31,6 @@ SELECT mydb1.t1.* FROM mydb1.t1; SELECT t1.* FROM mydb1.t1; USE mydb2; SELECT t1.* FROM t1; --- TODO: Support this scenario SELECT mydb1.t1.* FROM mydb1.t1; SELECT t1.* FROM mydb1.t1; SELECT a.* FROM mydb1.t1 AS a; @@ -47,21 +43,17 @@ CREATE TABLE t4 USING parquet AS SELECT * FROM VALUES (4,1), (2,1) AS t4(c2, c3) SELECT * FROM t3 WHERE c1 IN (SELECT c2 FROM t4 WHERE t4.c3 = t3.c2); --- TODO: Support this scenario SELECT * FROM mydb1.t3 WHERE c1 IN (SELECT mydb1.t4.c2 FROM mydb1.t4 WHERE mydb1.t4.c3 = mydb1.t3.c2); -- Scenario: column resolution scenarios in join queries SET spark.sql.crossJoin.enabled = true; --- TODO: Support this scenario SELECT mydb1.t1.i1 FROM t1, mydb2.t1; --- TODO: Support this scenario SELECT mydb1.t1.i1 FROM mydb1.t1, mydb2.t1; USE mydb2; --- TODO: Support this scenario SELECT mydb1.t1.i1 FROM t1, mydb1.t1; SET spark.sql.crossJoin.enabled = false; @@ -75,12 +67,10 @@ SELECT t5.t5.i1 FROM mydb1.t5; SELECT t5.i1 FROM mydb1.t5; SELECT t5.* FROM mydb1.t5; SELECT t5.t5.* FROM mydb1.t5; --- TODO: Support this scenario SELECT mydb1.t5.t5.i1 FROM mydb1.t5; --- TODO: Support this scenario SELECT mydb1.t5.t5.i2 FROM mydb1.t5; --- TODO: Support this scenario SELECT mydb1.t5.* FROM mydb1.t5; +SELECT mydb1.t5.* FROM t5; -- Cleanup and Reset USE default; diff --git a/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out b/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out index 539f673c9d67..9fc97f0c3914 100644 --- a/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out @@ -72,7 +72,7 @@ SELECT i1 FROM t1, mydb1.t1 struct<> -- !query 8 output org.apache.spark.sql.AnalysisException -Reference 'i1' is ambiguous, could be: t1.i1, t1.i1.; line 1 pos 7 +Reference 'i1' is ambiguous, could be: mydb1.t1.i1, mydb1.t1.i1.; line 1 pos 7 -- !query 9 @@ -81,7 +81,7 @@ SELECT t1.i1 FROM t1, mydb1.t1 struct<> -- !query 9 output org.apache.spark.sql.AnalysisException -Reference 't1.i1' is ambiguous, could be: t1.i1, t1.i1.; line 1 pos 7 +Reference 't1.i1' is ambiguous, could be: mydb1.t1.i1, mydb1.t1.i1.; line 1 pos 7 -- !query 10 @@ -90,7 +90,7 @@ SELECT mydb1.t1.i1 FROM t1, mydb1.t1 struct<> -- !query 10 output org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t1.i1`' given input columns: [t1.i1, t1.i1]; line 1 pos 7 +Reference 'mydb1.t1.i1' is ambiguous, could be: mydb1.t1.i1, mydb1.t1.i1.; line 1 pos 7 -- !query 11 @@ -99,7 +99,7 @@ SELECT i1 FROM t1, mydb2.t1 struct<> -- !query 11 output org.apache.spark.sql.AnalysisException -Reference 'i1' is ambiguous, could be: t1.i1, t1.i1.; line 1 pos 7 +Reference 'i1' is ambiguous, could be: mydb1.t1.i1, mydb2.t1.i1.; line 1 pos 7 -- !query 12 @@ -108,7 +108,7 @@ SELECT t1.i1 FROM t1, mydb2.t1 struct<> -- !query 12 output org.apache.spark.sql.AnalysisException -Reference 't1.i1' is ambiguous, could be: t1.i1, t1.i1.; line 1 pos 7 +Reference 't1.i1' is ambiguous, could be: mydb1.t1.i1, mydb2.t1.i1.; line 1 pos 7 -- !query 13 @@ -125,7 +125,7 @@ SELECT i1 FROM t1, mydb1.t1 struct<> -- !query 14 output org.apache.spark.sql.AnalysisException -Reference 'i1' is ambiguous, could be: t1.i1, t1.i1.; line 1 pos 7 +Reference 'i1' is ambiguous, could be: mydb2.t1.i1, mydb1.t1.i1.; line 1 pos 7 -- !query 15 @@ -134,7 +134,7 @@ SELECT t1.i1 FROM t1, mydb1.t1 struct<> -- !query 15 output org.apache.spark.sql.AnalysisException -Reference 't1.i1' is ambiguous, could be: t1.i1, t1.i1.; line 1 pos 7 +Reference 't1.i1' is ambiguous, could be: mydb2.t1.i1, mydb1.t1.i1.; line 1 pos 7 -- !query 16 @@ -143,7 +143,7 @@ SELECT i1 FROM t1, mydb2.t1 struct<> -- !query 16 output org.apache.spark.sql.AnalysisException -Reference 'i1' is ambiguous, could be: t1.i1, t1.i1.; line 1 pos 7 +Reference 'i1' is ambiguous, could be: mydb2.t1.i1, mydb2.t1.i1.; line 1 pos 7 -- !query 17 @@ -152,7 +152,7 @@ SELECT t1.i1 FROM t1, mydb2.t1 struct<> -- !query 17 output org.apache.spark.sql.AnalysisException -Reference 't1.i1' is ambiguous, could be: t1.i1, t1.i1.; line 1 pos 7 +Reference 't1.i1' is ambiguous, could be: mydb2.t1.i1, mydb2.t1.i1.; line 1 pos 7 -- !query 18 @@ -161,7 +161,7 @@ SELECT db1.t1.i1 FROM t1, mydb2.t1 struct<> -- !query 18 output org.apache.spark.sql.AnalysisException -cannot resolve '`db1.t1.i1`' given input columns: [t1.i1, t1.i1]; line 1 pos 7 +cannot resolve '`db1.t1.i1`' given input columns: [mydb2.t1.i1, mydb2.t1.i1]; line 1 pos 7 -- !query 19 @@ -186,7 +186,7 @@ SELECT mydb1.t1 FROM t1 struct<> -- !query 21 output org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t1`' given input columns: [t1.i1]; line 1 pos 7 +cannot resolve '`mydb1.t1`' given input columns: [mydb1.t1.i1]; line 1 pos 7 -- !query 22 @@ -204,7 +204,7 @@ SELECT t1 FROM mydb1.t1 struct<> -- !query 23 output org.apache.spark.sql.AnalysisException -cannot resolve '`t1`' given input columns: [t1.i1]; line 1 pos 7 +cannot resolve '`t1`' given input columns: [mydb1.t1.i1]; line 1 pos 7 -- !query 24 @@ -221,7 +221,7 @@ SELECT mydb1.t1.i1 FROM t1 struct<> -- !query 25 output org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t1.i1`' given input columns: [t1.i1]; line 1 pos 7 +cannot resolve '`mydb1.t1.i1`' given input columns: [mydb2.t1.i1]; line 1 pos 7 -- !query 26 diff --git a/sql/core/src/test/resources/sql-tests/results/columnresolution-views.sql.out b/sql/core/src/test/resources/sql-tests/results/columnresolution-views.sql.out index 209211960095..3d8fb661afe5 100644 --- a/sql/core/src/test/resources/sql-tests/results/columnresolution-views.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/columnresolution-views.sql.out @@ -85,10 +85,9 @@ struct -- !query 10 SELECT global_temp.view1.* FROM global_temp.view1 -- !query 10 schema -struct<> +struct -- !query 10 output -org.apache.spark.sql.AnalysisException -cannot resolve 'global_temp.view1.*' given input columns 'i1'; +1 -- !query 11 @@ -102,10 +101,9 @@ struct -- !query 12 SELECT global_temp.view1.i1 FROM global_temp.view1 -- !query 12 schema -struct<> +struct -- !query 12 output -org.apache.spark.sql.AnalysisException -cannot resolve '`global_temp.view1.i1`' given input columns: [view1.i1]; line 1 pos 7 +1 -- !query 13 diff --git a/sql/core/src/test/resources/sql-tests/results/columnresolution.sql.out b/sql/core/src/test/resources/sql-tests/results/columnresolution.sql.out index e10f516ad6e5..73e3fdc08232 100644 --- a/sql/core/src/test/resources/sql-tests/results/columnresolution.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/columnresolution.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 54 +-- Number of queries: 55 -- !query 0 @@ -93,19 +93,17 @@ struct -- !query 11 SELECT mydb1.t1.i1 FROM t1 -- !query 11 schema -struct<> +struct -- !query 11 output -org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t1.i1`' given input columns: [t1.i1]; line 1 pos 7 +1 -- !query 12 SELECT mydb1.t1.i1 FROM mydb1.t1 -- !query 12 schema -struct<> +struct -- !query 12 output -org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t1.i1`' given input columns: [t1.i1]; line 1 pos 7 +1 -- !query 13 @@ -151,10 +149,9 @@ struct -- !query 18 SELECT mydb1.t1.i1 FROM mydb1.t1 -- !query 18 schema -struct<> +struct -- !query 18 output -org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t1.i1`' given input columns: [t1.i1]; line 1 pos 7 +1 -- !query 19 @@ -176,10 +173,9 @@ struct -- !query 21 SELECT mydb1.t1.* FROM mydb1.t1 -- !query 21 schema -struct<> +struct -- !query 21 output -org.apache.spark.sql.AnalysisException -cannot resolve 'mydb1.t1.*' given input columns 'i1'; +1 -- !query 22 @@ -209,10 +205,9 @@ struct -- !query 25 SELECT mydb1.t1.* FROM mydb1.t1 -- !query 25 schema -struct<> +struct -- !query 25 output -org.apache.spark.sql.AnalysisException -cannot resolve 'mydb1.t1.*' given input columns 'i1'; +1 -- !query 26 @@ -267,10 +262,9 @@ struct SELECT * FROM mydb1.t3 WHERE c1 IN (SELECT mydb1.t4.c2 FROM mydb1.t4 WHERE mydb1.t4.c3 = mydb1.t3.c2) -- !query 32 schema -struct<> +struct -- !query 32 output -org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t4.c3`' given input columns: [t4.c2, t4.c3]; line 2 pos 42 +4 1 -- !query 33 @@ -284,19 +278,17 @@ spark.sql.crossJoin.enabled true -- !query 34 SELECT mydb1.t1.i1 FROM t1, mydb2.t1 -- !query 34 schema -struct<> +struct -- !query 34 output -org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t1.i1`' given input columns: [t1.i1, t1.i1]; line 1 pos 7 +1 -- !query 35 SELECT mydb1.t1.i1 FROM mydb1.t1, mydb2.t1 -- !query 35 schema -struct<> +struct -- !query 35 output -org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t1.i1`' given input columns: [t1.i1, t1.i1]; line 1 pos 7 +1 -- !query 36 @@ -310,10 +302,9 @@ struct<> -- !query 37 SELECT mydb1.t1.i1 FROM t1, mydb1.t1 -- !query 37 schema -struct<> +struct -- !query 37 output -org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t1.i1`' given input columns: [t1.i1, t1.i1]; line 1 pos 7 +1 -- !query 38 @@ -399,40 +390,37 @@ struct -- !query 48 SELECT mydb1.t5.t5.i1 FROM mydb1.t5 -- !query 48 schema -struct<> +struct -- !query 48 output -org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t5.t5.i1`' given input columns: [t5.i1, t5.t5]; line 1 pos 7 +2 -- !query 49 SELECT mydb1.t5.t5.i2 FROM mydb1.t5 -- !query 49 schema -struct<> +struct -- !query 49 output -org.apache.spark.sql.AnalysisException -cannot resolve '`mydb1.t5.t5.i2`' given input columns: [t5.i1, t5.t5]; line 1 pos 7 +3 -- !query 50 SELECT mydb1.t5.* FROM mydb1.t5 -- !query 50 schema -struct<> +struct> -- !query 50 output -org.apache.spark.sql.AnalysisException -cannot resolve 'mydb1.t5.*' given input columns 'i1, t5'; +1 {"i1":2,"i2":3} -- !query 51 -USE default +SELECT mydb1.t5.* FROM t5 -- !query 51 schema -struct<> +struct> -- !query 51 output - +1 {"i1":2,"i2":3} -- !query 52 -DROP DATABASE mydb1 CASCADE +USE default -- !query 52 schema struct<> -- !query 52 output @@ -440,8 +428,16 @@ struct<> -- !query 53 -DROP DATABASE mydb2 CASCADE +DROP DATABASE mydb1 CASCADE -- !query 53 schema struct<> -- !query 53 output + + +-- !query 54 +DROP DATABASE mydb2 CASCADE +-- !query 54 schema +struct<> +-- !query 54 output + diff --git a/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out index d5f8705a35ed..7b3dc8438888 100644 --- a/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out @@ -36,14 +36,14 @@ struct -- !query 3 output == Parsed Logical Plan == 'Project [concat(concat(concat('col1, 'col2), 'col3), 'col4) AS col#x] -+- 'SubqueryAlias __auto_generated_subquery_name ++- 'SubqueryAlias `__auto_generated_subquery_name` +- 'Project ['id AS col1#x, 'id AS col2#x, 'id AS col3#x, 'id AS col4#x] +- 'UnresolvedTableValuedFunction range, [10] == Analyzed Logical Plan == col: string Project [concat(concat(concat(cast(col1#xL as string), cast(col2#xL as string)), cast(col3#xL as string)), cast(col4#xL as string)) AS col#x] -+- SubqueryAlias __auto_generated_subquery_name ++- SubqueryAlias `__auto_generated_subquery_name` +- Project [id#xL AS col1#xL, id#xL AS col2#xL, id#xL AS col3#xL, id#xL AS col4#xL] +- Range (0, 10, step=1, splits=None) diff --git a/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out b/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out index 2586f26f71c3..e49978ddb1ce 100644 --- a/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out @@ -109,8 +109,8 @@ struct<> org.apache.spark.sql.AnalysisException Expressions referencing the outer query are not supported outside of WHERE/HAVING clauses: Aggregate [min(outer(t2a#x)) AS min(outer())#x] -+- SubqueryAlias t3 ++- SubqueryAlias `t3` +- Project [t3a#x, t3b#x, t3c#x] - +- SubqueryAlias t3 + +- SubqueryAlias `t3` +- LocalRelation [t3a#x, t3b#x, t3c#x] ; diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala index 2cb7a04714a5..3a393d766b0b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala @@ -2689,7 +2689,8 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext { val m = intercept[AnalysisException] { sql("SELECT * FROM t, S WHERE c = C") }.message - assert(m.contains("cannot resolve '(t.`c` = S.`C`)' due to data type mismatch")) + assert( + m.contains("cannot resolve '(default.t.`c` = default.S.`C`)' due to data type mismatch")) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala index d254345e8fa5..bdc106325aa5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala @@ -624,7 +624,7 @@ class PlannerSuite extends SharedSQLContext { dataType = LongType, nullable = false ) (exprId = exprId, - qualifier = Some("col1_qualifier") + qualifier = Seq("col1_qualifier") ) val attribute2 = diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala index abe61a2c2b9c..fccee97820e7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala @@ -72,7 +72,7 @@ object TPCDSQueryBenchmark extends Logging { val queryRelations = scala.collection.mutable.HashSet[String]() spark.sql(queryString).queryExecution.analyzed.foreach { case SubqueryAlias(alias, _: LogicalRelation) => - queryRelations.add(alias) + queryRelations.add(alias.identifier) case LogicalRelation(_, _, Some(catalogTable), _) => queryRelations.add(catalogTable.identifier.table) case HiveTableRelation(tableMeta, _, _) => diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala index ba9b944e4a05..688b619cd1bb 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala @@ -18,7 +18,7 @@ package org.apache.spark.sql.hive import org.apache.spark.sql.{QueryTest, Row, SaveMode} -import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.catalyst.{AliasIdentifier, TableIdentifier} import org.apache.spark.sql.catalyst.catalog.CatalogTableType import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.catalyst.plans.logical.SubqueryAlias @@ -62,7 +62,7 @@ class HiveMetastoreCatalogSuite extends TestHiveSingleton with SQLTestUtils { spark.sql("create view vw1 as select 1 as id") val plan = spark.sql("select id from vw1").queryExecution.analyzed val aliases = plan.collect { - case x @ SubqueryAlias("vw1", _) => x + case x @ SubqueryAlias(AliasIdentifier("vw1", Some("default")), _) => x } assert(aliases.size == 1) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala index 1a916824c5d9..13aa2b843667 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala @@ -1967,6 +1967,22 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton { } } + test("column resolution scenarios with hive table") { + val currentDb = spark.catalog.currentDatabase + withTempDatabase { db1 => + try { + spark.catalog.setCurrentDatabase(db1) + spark.sql("CREATE TABLE t1(i1 int) STORED AS parquet") + spark.sql("INSERT INTO t1 VALUES(1)") + checkAnswer(spark.sql(s"SELECT $db1.t1.i1 FROM t1"), Row(1)) + checkAnswer(spark.sql(s"SELECT $db1.t1.i1 FROM $db1.t1"), Row(1)) + checkAnswer(spark.sql(s"SELECT $db1.t1.* FROM $db1.t1"), Row(1)) + } finally { + spark.catalog.setCurrentDatabase(currentDb) + } + } + } + test("SPARK-17409: Do Not Optimize Query in CTAS (Hive Serde Table) More Than Once") { withTable("bar") { withTempView("foo") {