From 7853316d376433c56333a34abf3f277910a98269 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:20 -0500 Subject: [PATCH 01/13] spelling: offset Signed-off-by: Josh Soref --- .../scala/org/apache/spark/graphx/lib/PageRankSuite.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala index a5e2fc5c9a74f..8008a89c6cd5f 100644 --- a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala +++ b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala @@ -274,8 +274,8 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext { withSpark { sc => // Check that implementation can handle large vertexIds, SPARK-25149 val vertexIdOffset = Int.MaxValue.toLong + 1 - val sourceOffest = 4 - val source = vertexIdOffset + sourceOffest + val sourceOffset = 4 + val source = vertexIdOffset + sourceOffset val numIter = 10 val vertices = vertexIdOffset until vertexIdOffset + numIter val chain1 = vertices.zip(vertices.tail) @@ -285,7 +285,7 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext { val tol = 0.0001 val errorTol = 1.0e-1 - val a = resetProb / (1 - Math.pow(1 - resetProb, numIter - sourceOffest)) + val a = resetProb / (1 - Math.pow(1 - resetProb, numIter - sourceOffset)) // We expect the rank to decay as (1 - resetProb) ^ distance val expectedRanks = sc.parallelize(vertices).map { vid => val rank = if (vid < source) { From 595a25285964b0579953d5fd14dd705c24d8d766 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:16 -0500 Subject: [PATCH 02/13] spelling: depends Signed-off-by: Josh Soref --- .../apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala index 853d201ba7ea5..9d372b2eaedea 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala @@ -569,7 +569,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { val rows = spark.table("kafkaWatermark").collect() assert(rows.length === 1, s"Unexpected results: ${rows.toList}") val row = rows(0) - // We cannot check the exact window start time as it depands on the time that messages were + // We cannot check the exact window start time as it depends on the time that messages were // inserted by the producer. So here we just use a low bound to make sure the internal // conversion works. assert( From cbb4c8640f77b3a514999c16cae7529a85110ad7 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:17 -0500 Subject: [PATCH 03/13] spelling: e.g. Signed-off-by: Josh Soref --- .../org/apache/spark/examples/streaming/JavaCustomReceiver.java | 2 +- .../apache/spark/examples/streaming/JavaNetworkWordCount.java | 2 +- .../examples/streaming/JavaRecoverableNetworkWordCount.java | 2 +- .../spark/examples/streaming/JavaSqlNetworkWordCount.java | 2 +- .../src/main/python/streaming/recoverable_network_wordcount.py | 2 +- examples/src/main/python/streaming/sql_network_wordcount.py | 2 +- .../org/apache/spark/examples/streaming/CustomReceiver.scala | 2 +- .../org/apache/spark/examples/streaming/NetworkWordCount.scala | 2 +- .../spark/examples/streaming/RecoverableNetworkWordCount.scala | 2 +- .../apache/spark/examples/streaming/SqlNetworkWordCount.scala | 2 +- .../spark/examples/streaming/StatefulNetworkWordCount.scala | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java index 47692ec982890..f84a1978de1ad 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java @@ -67,7 +67,7 @@ public static void main(String[] args) throws Exception { JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, new Duration(1000)); // Create an input stream with the custom receiver on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') JavaReceiverInputDStream lines = ssc.receiverStream( new JavaCustomReceiver(args[0], Integer.parseInt(args[1]))); JavaDStream words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator()); diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java index b217672def88e..d56134bd99e36 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java @@ -57,7 +57,7 @@ public static void main(String[] args) throws Exception { JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1)); // Create a JavaReceiverInputDStream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') // Note that no duplication in storage level only for running locally. // Replication necessary in distributed scenario for fault tolerance. JavaReceiverInputDStream lines = ssc.socketTextStream( diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java index c01a62b078f7a..0c11c40cfe7ed 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java @@ -126,7 +126,7 @@ private static JavaStreamingContext createContext(String ip, ssc.checkpoint(checkpointDirectory); // Create a socket stream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') JavaReceiverInputDStream lines = ssc.socketTextStream(ip, port); JavaDStream words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator()); JavaPairDStream wordCounts = words.mapToPair(s -> new Tuple2<>(s, 1)) diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java index 948d1a2111780..5d30698c93372 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java @@ -59,7 +59,7 @@ public static void main(String[] args) throws Exception { JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1)); // Create a JavaReceiverInputDStream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') // Note that no duplication in storage level only for running locally. // Replication necessary in distributed scenario for fault tolerance. JavaReceiverInputDStream lines = ssc.socketTextStream( diff --git a/examples/src/main/python/streaming/recoverable_network_wordcount.py b/examples/src/main/python/streaming/recoverable_network_wordcount.py index 6ebe91a2f47fe..567f9c819e3ad 100644 --- a/examples/src/main/python/streaming/recoverable_network_wordcount.py +++ b/examples/src/main/python/streaming/recoverable_network_wordcount.py @@ -66,7 +66,7 @@ def createContext(host, port, outputPath): ssc = StreamingContext(sc, 1) # Create a socket stream on target ip:port and count the - # words in input stream of \n delimited text (eg. generated by 'nc') + # words in input stream of \n delimited text (e.g. generated by 'nc') lines = ssc.socketTextStream(host, port) words = lines.flatMap(lambda line: line.split(" ")) wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y) diff --git a/examples/src/main/python/streaming/sql_network_wordcount.py b/examples/src/main/python/streaming/sql_network_wordcount.py index 59a8a11a45b19..2965ea8fb1872 100644 --- a/examples/src/main/python/streaming/sql_network_wordcount.py +++ b/examples/src/main/python/streaming/sql_network_wordcount.py @@ -52,7 +52,7 @@ def getSparkSessionInstance(sparkConf): ssc = StreamingContext(sc, 1) # Create a socket stream on target ip:port and count the - # words in input stream of \n delimited text (eg. generated by 'nc') + # words in input stream of \n delimited text (e.g. generated by 'nc') lines = ssc.socketTextStream(host, int(port)) words = lines.flatMap(lambda line: line.split(" ")) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala index 0f47deaf1021b..626f4b4d3ccdf 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala @@ -50,7 +50,7 @@ object CustomReceiver { val ssc = new StreamingContext(sparkConf, Seconds(1)) // Create an input stream with the custom receiver on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') val lines = ssc.receiverStream(new CustomReceiver(args(0), args(1).toInt)) val words = lines.flatMap(_.split(" ")) val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala index 26bb51dde3a1d..7d981dfb949ea 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala @@ -47,7 +47,7 @@ object NetworkWordCount { val ssc = new StreamingContext(sparkConf, Seconds(1)) // Create a socket stream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') // Note that no duplication in storage level only for running locally. // Replication necessary in distributed scenario for fault tolerance. val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala index ee3bbe40fbeed..98539d6494231 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala @@ -112,7 +112,7 @@ object RecoverableNetworkWordCount { ssc.checkpoint(checkpointDirectory) // Create a socket stream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') val lines = ssc.socketTextStream(ip, port) val words = lines.flatMap(_.split(" ")) val wordCounts = words.map((_, 1)).reduceByKey(_ + _) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala index 778be7baaeeac..7daa0014e0f1c 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala @@ -51,7 +51,7 @@ object SqlNetworkWordCount { val ssc = new StreamingContext(sparkConf, Seconds(2)) // Create a socket stream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') // Note that no duplication in storage level only for running locally. // Replication necessary in distributed scenario for fault tolerance. val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala index 46f01edf7deec..8a5fcda9cd990 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala @@ -52,7 +52,7 @@ object StatefulNetworkWordCount { val initialRDD = ssc.sparkContext.parallelize(List(("hello", 1), ("world", 1))) // Create a ReceiverInputDStream on target ip:port and count the - // words in input stream of \n delimited test (eg. generated by 'nc') + // words in input stream of \n delimited test (e.g. generated by 'nc') val lines = ssc.socketTextStream(args(0), args(1).toInt) val words = lines.flatMap(_.split(" ")) val wordDstream = words.map(x => (x, 1)) From 2730502e926c7b7a7f609b5bd225da9ab038f865 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:17 -0500 Subject: [PATCH 04/13] spelling: expected Signed-off-by: Josh Soref --- .../test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala index 2e726b9e650b6..e36555e514c9f 100644 --- a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala +++ b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala @@ -35,7 +35,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession { def testUpdateColumnNullability(tbl: String): Unit = { sql(s"CREATE TABLE $catalogName.alt_table (ID STRING NOT NULL) USING _") var t = spark.table(s"$catalogName.alt_table") - // nullable is true in the expecteSchema because Spark always sets nullable to true + // nullable is true in the expectedSchema because Spark always sets nullable to true // regardless of the JDBC metadata https://github.com/apache/spark/pull/18445 var expectedSchema = new StructType().add("ID", StringType, nullable = true) assert(t.schema === expectedSchema) From bd57831aab74e1fa3821c7ac2a2321ae032dbc8a Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:18 -0500 Subject: [PATCH 05/13] spelling: external Signed-off-by: Josh Soref --- .../spark/streaming/kinesis/KinesisUtilsPythonHelper.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtilsPythonHelper.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtilsPythonHelper.scala index c89dedd3366d1..0056438c4eefb 100644 --- a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtilsPythonHelper.scala +++ b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtilsPythonHelper.scala @@ -46,7 +46,7 @@ private class KinesisUtilsPythonHelper { // scalastyle:on if (!(stsAssumeRoleArn != null && stsSessionName != null && stsExternalId != null) && !(stsAssumeRoleArn == null && stsSessionName == null && stsExternalId == null)) { - throw new IllegalArgumentException("stsAssumeRoleArn, stsSessionName, and stsExtenalId " + + throw new IllegalArgumentException("stsAssumeRoleArn, stsSessionName, and stsExternalId " + "must all be defined or all be null") } if (awsAccessKeyId == null && awsSecretKey != null) { From 1cf7aa284a9215b3fc8dfaac228547f29cb7c90f Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:18 -0500 Subject: [PATCH 06/13] spelling: i.e. Signed-off-by: Josh Soref --- .../spark/examples/streaming/JavaKinesisWordCountASL.java | 2 +- .../main/python/examples/streaming/kinesis_wordcount_asl.py | 2 +- .../spark/examples/streaming/KinesisWordCountASL.scala | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java b/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java index d704aeb507518..244873af70de9 100644 --- a/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java +++ b/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java @@ -49,7 +49,7 @@ * * Usage: JavaKinesisWordCountASL [app-name] [stream-name] [endpoint-url] [region-name] * [app-name] is the name of the consumer app, used to track the read data in DynamoDB - * [stream-name] name of the Kinesis stream (ie. mySparkStream) + * [stream-name] name of the Kinesis stream (i.e. mySparkStream) * [endpoint-url] endpoint of the Kinesis service * (e.g. https://kinesis.us-east-1.amazonaws.com) * diff --git a/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py b/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py index df8c64e531cfa..06ada13b52399 100644 --- a/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py +++ b/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py @@ -23,7 +23,7 @@ Usage: kinesis_wordcount_asl.py is the name of the consumer app, used to track the read data in DynamoDB - name of the Kinesis stream (ie. mySparkStream) + name of the Kinesis stream (i.e. mySparkStream) endpoint of the Kinesis service (e.g. https://kinesis.us-east-1.amazonaws.com) region name of the Kinesis endpoint (e.g. us-east-1) diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala index bbb6008c2dddf..d6a9160eed98e 100644 --- a/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala +++ b/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala @@ -43,7 +43,7 @@ import org.apache.spark.streaming.kinesis.KinesisInputDStream * * Usage: KinesisWordCountASL * is the name of the consumer app, used to track the read data in DynamoDB - * name of the Kinesis stream (ie. mySparkStream) + * name of the Kinesis stream (i.e. mySparkStream) * endpoint of the Kinesis service * (e.g. https://kinesis.us-east-1.amazonaws.com) * @@ -167,9 +167,9 @@ object KinesisWordCountASL extends Logging { * Usage: KinesisWordProducerASL \ * * - * is the name of the Kinesis stream (ie. mySparkStream) + * is the name of the Kinesis stream (i.e. mySparkStream) * is the endpoint of the Kinesis service - * (ie. https://kinesis.us-east-1.amazonaws.com) + * (i.e. https://kinesis.us-east-1.amazonaws.com) * is the rate of records per second to put onto the stream * is the number of words per record * From bdcd89c8f39864d912f220f4e630e90fe258ccf3 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:19 -0500 Subject: [PATCH 07/13] spelling: manager Signed-off-by: Josh Soref --- .../scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala index 3a86352e42d2b..a3956c66d5521 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala @@ -577,7 +577,7 @@ class KafkaTestUtils( // ensure that logs from all replicas are deleted if delete topic is marked successful assert(servers.forall(server => topicAndPartitions.forall(tp => server.getLogManager().getLog(tp).isEmpty)), - s"topic $topic still exists in log mananger") + s"topic $topic still exists in log manager") // ensure that topic is removed from all cleaner offsets assert(servers.forall(server => topicAndPartitions.forall { tp => val checkpoints = server.getLogManager().liveLogDirs.map { logDir => From 1b0870dafe2608d051159c2de9025e3f44a12d8f Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:20 -0500 Subject: [PATCH 08/13] spelling: otherwise Signed-off-by: Josh Soref --- .../org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala b/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala index d6123e16dd238..2053d3655d860 100644 --- a/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala +++ b/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala @@ -42,7 +42,7 @@ class KafkaRDDSuite extends SparkFunSuite with BeforeAndAfterAll { private val sparkConf = new SparkConf().setMaster("local[4]") .setAppName(this.getClass.getSimpleName) // Set a timeout of 10 seconds that's going to be used to fetch topics/partitions from kafka. - // Othewise the poll timeout defaults to 2 minutes and causes test cases to run longer. + // Otherwise the poll timeout defaults to 2 minutes and causes test cases to run longer. .set("spark.streaming.kafka.consumer.poll.ms", "10000") private var sc: SparkContext = _ From 6d3ab968891154e9a40e5f127f9d687814deaa35 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:21 -0500 Subject: [PATCH 09/13] spelling: perform Signed-off-by: Josh Soref --- examples/src/main/python/ml/train_validation_split.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/main/python/ml/train_validation_split.py b/examples/src/main/python/ml/train_validation_split.py index d4f9184bf576e..5e3dc7b3ec2fa 100644 --- a/examples/src/main/python/ml/train_validation_split.py +++ b/examples/src/main/python/ml/train_validation_split.py @@ -17,7 +17,7 @@ """ This example demonstrates applying TrainValidationSplit to split data -and preform model selection. +and perform model selection. Run with: bin/spark-submit examples/src/main/python/ml/train_validation_split.py From 00efb56fad5dd016a696966822f38b839c4c1f83 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:22 -0500 Subject: [PATCH 10/13] spelling: series Signed-off-by: Josh Soref --- examples/src/main/python/sql/arrow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/main/python/sql/arrow.py b/examples/src/main/python/sql/arrow.py index 9978e8601449a..74616ede08820 100644 --- a/examples/src/main/python/sql/arrow.py +++ b/examples/src/main/python/sql/arrow.py @@ -285,7 +285,7 @@ def asof_join(l, r): ser_to_frame_pandas_udf_example(spark) print("Running pandas_udf example: Series to Series") ser_to_ser_pandas_udf_example(spark) - print("Running pandas_udf example: Iterator of Series to Iterator of Seires") + print("Running pandas_udf example: Iterator of Series to Iterator of Series") iter_ser_to_iter_ser_pandas_udf_example(spark) print("Running pandas_udf example: Iterator of Multiple Series to Iterator of Series") iter_sers_to_iter_ser_pandas_udf_example(spark) From 3fe0d85c1ae89ce3f230d2383edfa89d19bde126 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:22 -0500 Subject: [PATCH 11/13] spelling: struct Signed-off-by: Josh Soref --- examples/src/main/python/sql/arrow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/main/python/sql/arrow.py b/examples/src/main/python/sql/arrow.py index 74616ede08820..a0eba0fbede73 100644 --- a/examples/src/main/python/sql/arrow.py +++ b/examples/src/main/python/sql/arrow.py @@ -60,7 +60,7 @@ def func(s1: pd.Series, s2: pd.Series, s3: pd.DataFrame) -> pd.DataFrame: s3['col2'] = s1 + s2.str.len() return s3 - # Create a Spark DataFrame that has three columns including a sturct column. + # Create a Spark DataFrame that has three columns including a struct column. df = spark.createDataFrame( [[1, "a string", ("a nested string",)]], "long_col long, string_col string, struct_col struct") From 45857d27e1fd85a16502b7ee0d6737515c62f17a Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:23 -0500 Subject: [PATCH 12/13] spelling: transactional Signed-off-by: Josh Soref --- .../sql/kafka010/KafkaContinuousSourceSuite.scala | 4 ++-- .../sql/kafka010/KafkaMicroBatchSourceSuite.scala | 10 +++++----- .../apache/spark/sql/kafka010/KafkaRelationSuite.scala | 4 ++-- .../org/apache/spark/sql/kafka010/KafkaTestUtils.scala | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala index 14dcbeef0d9a3..6801d14d036dd 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala @@ -33,7 +33,7 @@ class KafkaContinuousSourceSuite extends KafkaSourceSuiteBase with KafkaContinuo withTable(table) { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .readStream .format("kafka") @@ -99,7 +99,7 @@ class KafkaContinuousSourceSuite extends KafkaSourceSuiteBase with KafkaContinuo withTable(table) { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .readStream .format("kafka") diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala index 9d372b2eaedea..510c0c5bd28a5 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala @@ -836,7 +836,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { val topicPartition = new TopicPartition(topic, 0) // The message values are the same as their offsets to make the test easy to follow - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => testStream(mapped)( StartStream(Trigger.ProcessingTime(100), clock), waitUntilBatchProcessed, @@ -959,7 +959,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { val topicPartition = new TopicPartition(topic, 0) // The message values are the same as their offsets to make the test easy to follow - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => testStream(mapped)( StartStream(Trigger.ProcessingTime(100), clock), waitUntilBatchProcessed, @@ -1050,7 +1050,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { .load() .select($"value".as[String]) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => producer.beginTransaction() (0 to 3).foreach { i => producer.send(new ProducerRecord[String, String](topic, i.toString)).get() @@ -1066,7 +1066,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { // this case, if we forget to reset `FetchedData._nextOffsetInFetchedData` or // `FetchedData._offsetAfterPoll` (See SPARK-25495), the next batch will see incorrect // values and return wrong results hence fail the test. - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => producer.beginTransaction() (4 to 7).foreach { i => producer.send(new ProducerRecord[String, String](topic, i.toString)).get() @@ -1779,7 +1779,7 @@ abstract class KafkaSourceSuiteBase extends KafkaSourceTest { withTable(table) { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .readStream .format("kafka") diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala index e5f3a229622e1..6f5dc0bb081ba 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala @@ -503,7 +503,7 @@ abstract class KafkaRelationSuiteBase extends QueryTest with SharedSparkSession test("read Kafka transactional messages: read_committed") { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .read .format("kafka") @@ -552,7 +552,7 @@ abstract class KafkaRelationSuiteBase extends QueryTest with SharedSparkSession test("read Kafka transactional messages: read_uncommitted") { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .read .format("kafka") diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala index a3956c66d5521..c5f3086b38c99 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala @@ -539,7 +539,7 @@ class KafkaTestUtils( } /** Call `f` with a `KafkaProducer` that has initialized transactions. */ - def withTranscationalProducer(f: KafkaProducer[String, String] => Unit): Unit = { + def withTransactionalProducer(f: KafkaProducer[String, String] => Unit): Unit = { val props = producerConfiguration props.put("transactional.id", UUID.randomUUID().toString) val producer = new KafkaProducer[String, String](props) From 21be1afd3c3c3ad87da3fed960d189638022a5c2 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Tue, 10 Nov 2020 21:17:24 -0500 Subject: [PATCH 13/13] spelling: whether Signed-off-by: Josh Soref --- .../org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala index ad6a829fffd0d..00b7b413a964d 100644 --- a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala +++ b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala @@ -45,7 +45,7 @@ abstract class DatabaseOnDocker { val env: Map[String, String] /** - * Wheather or not to use ipc mode for shared memory when starting docker image + * Whether or not to use ipc mode for shared memory when starting docker image */ val usesIpc: Boolean