Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ object CatalystTypeConverters {
* Typical use case would be converting a collection of rows that have the same schema. You will
* call this function once to get a converter, and apply it to every row.
*/
private[sql] def createToCatalystConverter(dataType: DataType): Any => Any = {
def createToCatalystConverter(dataType: DataType): Any => Any = {
if (isPrimitive(dataType)) {
// Although the `else` branch here is capable of handling inbound conversion of primitives,
// we add some special-case handling for those types here. The motivation for this relates to
Expand All @@ -409,7 +409,7 @@ object CatalystTypeConverters {
* Typical use case would be converting a collection of rows that have the same schema. You will
* call this function once to get a converter, and apply it to every row.
*/
private[sql] def createToScalaConverter(dataType: DataType): Any => Any = {
def createToScalaConverter(dataType: DataType): Any => Any = {
if (isPrimitive(dataType)) {
identity
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -708,7 +708,7 @@ object ScalaReflection extends ScalaReflection {
/**
* Whether the fields of the given type is defined entirely by its constructor parameters.
*/
private[sql] def definedByConstructorParams(tpe: Type): Boolean = {
def definedByConstructorParams(tpe: Type): Boolean = {
tpe <:< localTypeOf[Product] || tpe <:< localTypeOf[DefinedByConstructorParams]
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ class Analyzer(
}.isDefined
}

private[sql] def hasGroupingFunction(e: Expression): Boolean = {
private[analysis] def hasGroupingFunction(e: Expression): Boolean = {
e.collectFirst {
case g: Grouping => g
case g: GroupingID => g
Expand Down Expand Up @@ -1412,7 +1412,7 @@ class Analyzer(
* Construct the output attributes for a [[Generator]], given a list of names. If the list of
* names is empty names are assigned from field names in generator.
*/
private[sql] def makeGeneratorOutput(
private[analysis] def makeGeneratorOutput(
generator: Generator,
names: Seq[String]): Seq[Attribute] = {
val elementAttrs = generator.elementSchema.toAttributes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ object TypeCoercion {

// See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types.
// The conversion for integral and floating point types have a linear widening hierarchy:
private[sql] val numericPrecedence =
val numericPrecedence =
IndexedSeq(
ByteType,
ShortType,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -750,7 +750,7 @@ class SessionCatalog(
*
* This performs reflection to decide what type of [[Expression]] to return in the builder.
*/
private[sql] def makeFunctionBuilder(name: String, functionClassName: String): FunctionBuilder = {
def makeFunctionBuilder(name: String, functionClassName: String): FunctionBuilder = {
// TODO: at least support UDAFs here
throw new UnsupportedOperationException("Use sqlContext.udf.register(...) instead.")
}
Expand Down Expand Up @@ -794,7 +794,7 @@ class SessionCatalog(
/**
* Look up the [[ExpressionInfo]] associated with the specified function, assuming it exists.
*/
private[spark] def lookupFunctionInfo(name: FunctionIdentifier): ExpressionInfo = synchronized {
def lookupFunctionInfo(name: FunctionIdentifier): ExpressionInfo = synchronized {
// TODO: just make function registry take in FunctionIdentifier instead of duplicating this
val database = name.database.orElse(Some(currentDb)).map(formatDatabaseName)
val qualifiedName = name.copy(database = database)
Expand Down Expand Up @@ -906,7 +906,7 @@ class SessionCatalog(
*
* This is mainly used for tests.
*/
private[sql] def reset(): Unit = synchronized {
def reset(): Unit = synchronized {
setCurrentDatabase(DEFAULT_DATABASE)
listDatabases().filter(_ != DEFAULT_DATABASE).foreach { db =>
dropDatabase(db, ignoreIfNotExists = false, cascade = true)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ package object encoders {
* references from a specific schema.) This requirement allows us to preserve whether a given
* object type is being bound by name or by ordinal when doing resolution.
*/
private[sql] def encoderFor[A : Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] match {
def encoderFor[A : Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] match {
case e: ExpressionEncoder[A] =>
e.assertUnresolved()
e
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,7 @@ abstract class BinaryOperator extends BinaryExpression with ExpectsInputTypes {
}


private[sql] object BinaryOperator {
object BinaryOperator {
def unapply(e: BinaryOperator): Option[(Expression, Expression)] = Some((e.left, e.right))
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ import org.apache.spark.sql.types.{DataType, LongType}
represent the record number within each partition. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.""",
extended = "> SELECT _FUNC_();\n 0")
private[sql] case class MonotonicallyIncreasingID() extends LeafExpression with Nondeterministic {
case class MonotonicallyIncreasingID() extends LeafExpression with Nondeterministic {

/**
* Record ID within each partition. By being transient, count's value is reset to 0 every time
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import org.apache.spark.sql.types.{DataType, IntegerType}
@ExpressionDescription(
usage = "_FUNC_() - Returns the current partition id of the Spark task",
extended = "> SELECT _FUNC_();\n 0")
private[sql] case class SparkPartitionID() extends LeafExpression with Nondeterministic {
case class SparkPartitionID() extends LeafExpression with Nondeterministic {

override def nullable: Boolean = false

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,44 +24,44 @@ import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.types._

/** The mode of an [[AggregateFunction]]. */
private[sql] sealed trait AggregateMode
sealed trait AggregateMode

/**
* An [[AggregateFunction]] with [[Partial]] mode is used for partial aggregation.
* This function updates the given aggregation buffer with the original input of this
* function. When it has processed all input rows, the aggregation buffer is returned.
*/
private[sql] case object Partial extends AggregateMode
case object Partial extends AggregateMode

/**
* An [[AggregateFunction]] with [[PartialMerge]] mode is used to merge aggregation buffers
* containing intermediate results for this function.
* This function updates the given aggregation buffer by merging multiple aggregation buffers.
* When it has processed all input rows, the aggregation buffer is returned.
*/
private[sql] case object PartialMerge extends AggregateMode
case object PartialMerge extends AggregateMode

/**
* An [[AggregateFunction]] with [[Final]] mode is used to merge aggregation buffers
* containing intermediate results for this function and then generate final result.
* This function updates the given aggregation buffer by merging multiple aggregation buffers.
* When it has processed all input rows, the final result of this function is returned.
*/
private[sql] case object Final extends AggregateMode
case object Final extends AggregateMode

/**
* An [[AggregateFunction]] with [[Complete]] mode is used to evaluate this function directly
* from original input rows without any partial aggregation.
* This function updates the given aggregation buffer with the original input of this
* function. When it has processed all input rows, the final result of this function is returned.
*/
private[sql] case object Complete extends AggregateMode
case object Complete extends AggregateMode

/**
* A place holder expressions used in code-gen, it does not change the corresponding value
* in the row.
*/
private[sql] case object NoOp extends Expression with Unevaluable {
case object NoOp extends Expression with Unevaluable {
override def nullable: Boolean = true
override def dataType: DataType = NullType
override def children: Seq[Expression] = Nil
Expand All @@ -84,7 +84,7 @@ object AggregateExpression {
* A container for an [[AggregateFunction]] with its [[AggregateMode]] and a field
* (`isDistinct`) indicating if DISTINCT keyword is specified for this function.
*/
private[sql] case class AggregateExpression(
case class AggregateExpression(
aggregateFunction: AggregateFunction,
mode: AggregateMode,
isDistinct: Boolean,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ abstract class BinaryArithmetic extends BinaryOperator {
}
}

private[sql] object BinaryArithmetic {
object BinaryArithmetic {
def unapply(e: BinaryArithmetic): Option[(Expression, Expression)] = Some((e.left, e.right))
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,8 @@ case class CreateArray(children: Seq[Expression]) extends Expression {
@ExpressionDescription(
usage = "_FUNC_(key0, value0, key1, value1...) - Creates a map with the given key/value pairs.")
case class CreateMap(children: Seq[Expression]) extends Expression {
private[sql] lazy val keys = children.indices.filter(_ % 2 == 0).map(children)
private[sql] lazy val values = children.indices.filter(_ % 2 != 0).map(children)
lazy val keys = children.indices.filter(_ % 2 == 0).map(children)
lazy val values = children.indices.filter(_ % 2 != 0).map(children)

override def foldable: Boolean = children.forall(_.foldable)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ trait ExtractValue extends Expression
case class GetStructField(child: Expression, ordinal: Int, name: Option[String] = None)
extends UnaryExpression with ExtractValue {

private[sql] lazy val childSchema = child.dataType.asInstanceOf[StructType]
lazy val childSchema = child.dataType.asInstanceOf[StructType]

override def dataType: DataType = childSchema(ordinal).dataType
override def nullable: Boolean = child.nullable || childSchema(ordinal).nullable
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ object XxHash64Function extends InterpretedHashFunction {
@ExpressionDescription(
usage = "_FUNC_() - Returns the current database.",
extended = "> SELECT _FUNC_()")
private[sql] case class CurrentDatabase() extends LeafExpression with Unevaluable {
case class CurrentDatabase() extends LeafExpression with Unevaluable {
override def dataType: DataType = StringType
override def foldable: Boolean = true
override def nullable: Boolean = false
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -394,13 +394,13 @@ abstract class BinaryComparison extends BinaryOperator with Predicate {
}


private[sql] object BinaryComparison {
object BinaryComparison {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = Some((e.left, e.right))
}


/** An extractor that matches both standard 3VL equality and null-safe equality. */
private[sql] object Equality {
object Equality {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = e match {
case EqualTo(l, r) => Some((l, r))
case EqualNullSafe(l, r) => Some((l, r))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ class GenericRowWithSchema(values: Array[Any], override val schema: StructType)
* Note that, while the array is not copied, and thus could technically be mutated after creation,
* this is not allowed.
*/
class GenericInternalRow(private[sql] val values: Array[Any]) extends BaseGenericInternalRow {
class GenericInternalRow(val values: Array[Any]) extends BaseGenericInternalRow {
/** No-arg constructor for serialization. */
protected def this() = this(null)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ abstract class SetOperation(left: LogicalPlan, right: LogicalPlan) extends Binar
}
}

private[sql] object SetOperation {
object SetOperation {
def unapply(p: SetOperation): Option[(LogicalPlan, LogicalPlan)] = Some((p.left, p.right))
}

Expand Down Expand Up @@ -365,7 +365,7 @@ case class InsertIntoTable(
override def children: Seq[LogicalPlan] = child :: Nil
override def output: Seq[Attribute] = Seq.empty

private[spark] lazy val expectedColumns = {
lazy val expectedColumns = {
if (table.output.isEmpty) {
None
} else {
Expand Down Expand Up @@ -509,7 +509,7 @@ case class Window(
def windowOutputSet: AttributeSet = AttributeSet(windowExpressions.map(_.toAttribute))
}

private[sql] object Expand {
object Expand {
/**
* Extract attribute set according to the grouping id.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ package org.apache.spark.sql.catalyst.util
* `Row` in order to work around a spurious IntelliJ compiler error. This cannot be an abstract
* class because that leads to compilation errors under Scala 2.11.
*/
private[spark] class AbstractScalaRowIterator[T] extends Iterator[T] {
class AbstractScalaRowIterator[T] extends Iterator[T] {
override def hasNext: Boolean = throw new NotImplementedError

override def next(): T = throw new NotImplementedError
Expand Down