Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion core/src/main/scala/org/apache/spark/SparkException.scala
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ private[spark] class SparkUpgradeException(version: String, message: String, cau
/**
* Arithmetic exception thrown from Spark with an error class.
*/
class SparkArithmeticException(errorClass: String, messageParameters: Array[String])
private[spark] class SparkArithmeticException(errorClass: String, messageParameters: Array[String])
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

extends ArithmeticException(SparkThrowableHelper.getMessage(errorClass, messageParameters))
with SparkThrowable {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import org.apache.spark.storage.{BlockId, BlockManagerId, BlockNotFoundException
/**
* Object for grouping error messages from (most) exceptions thrown during query execution.
*/
object SparkCoreErrors {
private[spark] object SparkCoreErrors {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

shall we mark org.apache.spark.errors as a private package as well?

def rddBlockNotFoundError(blockId: BlockId, id: Int): Throwable = {
new Exception(s"Could not compute split, block $blockId of RDD $id not found")
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import java.util.zip.Checksum
* A variant of [[java.util.zip.CheckedOutputStream]] which can
* change the checksum calculator at runtime.
*/
class MutableCheckedOutputStream(out: OutputStream) extends OutputStream {
private[spark] class MutableCheckedOutputStream(out: OutputStream) extends OutputStream {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM, thanks!

private var checksum: Checksum = _

def setChecksum(c: Checksum): Unit = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,15 @@
*/
package org.apache.spark.scheduler

import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.annotation.{DeveloperApi, Since}

/**
* :: DeveloperApi ::
* Stores information about an Miscellaneous Process to pass from the scheduler to SparkListeners.
*/

@DeveloperApi
@Since("3.2.0")
class MiscellaneousProcessDetails(
val hostPort: String,
val cores: Int,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,7 @@ case class SparkListenerUnschedulableTaskSetRemoved(
case class SparkListenerBlockUpdated(blockUpdatedInfo: BlockUpdatedInfo) extends SparkListenerEvent

@DeveloperApi
@Since("3.2.0")
case class SparkListenerMiscellaneousProcessAdded(time: Long, processId: String,
info: MiscellaneousProcessDetails) extends SparkListenerEvent

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import org.apache.spark.deploy.SparkSubmitUtils
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._

case class IvyProperties(
private[spark] case class IvyProperties(
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

packagesExclusions: String,
packages: String,
repositories: String,
Expand Down
1 change: 1 addition & 0 deletions project/SparkBuild.scala
Original file line number Diff line number Diff line change
Expand Up @@ -967,6 +967,7 @@ object Unidoc {
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/internal")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalog/v2/utils")))
.map(_.filterNot(_.getCanonicalPath.contains("org.apache.spark.sql.errors")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/hive")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/v2/avro")))
.map(_.filterNot(_.getCanonicalPath.contains("SSLOptions")))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,17 @@

package org.apache.spark.sql.connector.catalog;

import org.apache.spark.annotation.Evolving;
import org.apache.spark.sql.catalyst.analysis.NoSuchFunctionException;
import org.apache.spark.sql.catalyst.analysis.NoSuchNamespaceException;
import org.apache.spark.sql.connector.catalog.functions.UnboundFunction;

/**
* Catalog methods for working with Functions.
*
* @since 3.2.0
*/
@Evolving
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

public interface FunctionCatalog extends CatalogPlugin {

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@

/**
* Represents a table which can be atomically truncated.
*
* @since 3.2.0
*/
@Evolving
public interface TruncatableTable extends Table {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

package org.apache.spark.sql.connector.catalog.functions;

import org.apache.spark.annotation.Evolving;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.types.DataType;

Expand All @@ -42,7 +43,10 @@
*
* @param <S> the JVM type for the aggregation's intermediate state; must be {@link Serializable}
* @param <R> the JVM type of result values
*
* @since 3.2.0
*/
@Evolving
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also please take a look at the following DSV2 changes, thanks!

public interface AggregateFunction<S extends Serializable, R> extends BoundFunction {

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

package org.apache.spark.sql.connector.catalog.functions;

import org.apache.spark.annotation.Evolving;
import org.apache.spark.sql.types.DataType;
import org.apache.spark.sql.types.IntegerType;
import org.apache.spark.sql.types.StructType;
Expand All @@ -25,7 +26,10 @@

/**
* Represents a function that is bound to an input type.
*
* @since 3.2.0
*/
@Evolving
public interface BoundFunction extends Function {

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,16 @@

package org.apache.spark.sql.connector.catalog.functions;

import org.apache.spark.annotation.Evolving;

import java.io.Serializable;

/**
* Base class for user-defined functions.
*
* @since 3.2.0
*/
@Evolving
public interface Function extends Serializable {

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

package org.apache.spark.sql.connector.catalog.functions;

import org.apache.spark.annotation.Evolving;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.types.DataType;

Expand Down Expand Up @@ -133,7 +134,10 @@
*
* @param <R> the JVM type of result values, MUST be consistent with the {@link DataType}
* returned via {@link #resultType()}, according to the mapping above.
*
* @since 3.2.0
*/
@Evolving
public interface ScalarFunction<R> extends BoundFunction {
String MAGIC_METHOD_NAME = "invoke";

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,15 @@

package org.apache.spark.sql.connector.catalog.functions;

import org.apache.spark.annotation.Evolving;
import org.apache.spark.sql.types.StructType;

/**
* Represents a user-defined function that is not bound to input types.
*
* @since 3.2.0
*/
@Evolving
public interface UnboundFunction extends Function {

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
/**
* A mix-in interface for {@link SparkDataStream} streaming sources to signal that they can report
* metrics.
*
* @since 3.2.0
*/
@Evolving
public interface ReportsSourceMetrics extends SparkDataStream {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ import org.apache.spark.sql.types._
* As commands are executed eagerly, this also includes errors thrown during the execution of
* commands, which users can see immediately.
*/
private[spark] object QueryCompilationErrors {
object QueryCompilationErrors {

def groupingIDMismatchError(groupingID: GroupingID, groupByExprs: Seq[Expression]): Throwable = {
new AnalysisException(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package org.apache.spark.sql.types

import scala.collection.mutable

import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.util.Utils
Expand All @@ -31,6 +31,7 @@ import org.apache.spark.util.Utils
* alternative approach to register UDTs for user classes.
*/
@DeveloperApi
@Since("3.2.0")
object UDTRegistration extends Serializable with Logging {

/** The mapping between the Class between UserDefinedType and user classes. */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import java.util.Objects
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._

import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.annotation.{DeveloperApi, Since}

/**
* The data type for User Defined Types (UDTs).
Expand All @@ -38,6 +38,7 @@ import org.apache.spark.annotation.DeveloperApi
* The conversion via `deserialize` occurs when reading from a `DataFrame`.
*/
@DeveloperApi
@Since("3.2.0")
abstract class UserDefinedType[UserType >: Null] extends DataType with Serializable {

/** Underlying storage type for this UDT */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@
* <p>
* Tables that have {@link TableCapability#V1_BATCH_WRITE} in the list of their capabilities
* must build {@link V1Write}.
*
* @since 3.2.0
*/
@Unstable
public interface V1Write extends Write {
Expand Down