From e797c30b94fb191b844ab7ef247a84baf5260379 Mon Sep 17 00:00:00 2001 From: Cheng Lian Date: Mon, 18 Jul 2016 14:00:33 +0800 Subject: [PATCH 1/2] Minor Scala example update --- docs/sql-programming-guide.md | 5 ++--- .../org/apache/spark/examples/sql/SparkSqlExample.scala | 6 +++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md index 4413fdd2f637..f27ba4986022 100644 --- a/docs/sql-programming-guide.md +++ b/docs/sql-programming-guide.md @@ -1879,9 +1879,8 @@ Spark SQL and DataFrames support the following data types: All data types of Spark SQL are located in the package `org.apache.spark.sql.types`. You can access them by doing -{% highlight scala %} -import org.apache.spark.sql.types._ -{% endhighlight %} + +{% include_example data_types scala/org/apache/spark/examples/sql/SparkSqlExample.scala %} diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala index cf3f86426719..60e72d41af87 100644 --- a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala @@ -25,9 +25,9 @@ import org.apache.spark.sql.Row import org.apache.spark.sql.SparkSession // $example off:init_session$ // $example on:programmatic_schema$ -import org.apache.spark.sql.types.StringType -import org.apache.spark.sql.types.StructField -import org.apache.spark.sql.types.StructType +// $example on:data_types$ +import org.apache.spark.sql.types._ +// $example off:data_types$ // $example off:programmatic_schema$ object SparkSqlExample { From 927c46a6f7f641bf9959c2d82e7424d7fa2d2d0a Mon Sep 17 00:00:00 2001 From: Cheng Lian Date: Mon, 18 Jul 2016 17:44:36 +0800 Subject: [PATCH 2/2] Renames Scala/Java example files --- docs/sql-programming-guide.md | 54 +++++++++---------- ...ple.java => JavaSQLDataSourceExample.java} | 2 +- ...lExample.java => JavaSparkSQLExample.java} | 2 +- ...ample.scala => SQLDataSourceExample.scala} | 2 +- ...SqlExample.scala => SparkSQLExample.scala} | 2 +- 5 files changed, 31 insertions(+), 31 deletions(-) rename examples/src/main/java/org/apache/spark/examples/sql/{JavaSqlDataSourceExample.java => JavaSQLDataSourceExample.java} (99%) rename examples/src/main/java/org/apache/spark/examples/sql/{JavaSparkSqlExample.java => JavaSparkSQLExample.java} (99%) rename examples/src/main/scala/org/apache/spark/examples/sql/{SqlDataSourceExample.scala => SQLDataSourceExample.scala} (99%) rename examples/src/main/scala/org/apache/spark/examples/sql/{SparkSqlExample.scala => SparkSQLExample.scala} (99%) diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md index f27ba4986022..71f3ee40a3af 100644 --- a/docs/sql-programming-guide.md +++ b/docs/sql-programming-guide.md @@ -65,14 +65,14 @@ Throughout this document, we will often refer to Scala/Java Datasets of `Row`s a The entry point into all functionality in Spark is the [`SparkSession`](api/scala/index.html#org.apache.spark.sql.SparkSession) class. To create a basic `SparkSession`, just use `SparkSession.builder()`: -{% include_example init_session scala/org/apache/spark/examples/sql/SparkSqlExample.scala %} +{% include_example init_session scala/org/apache/spark/examples/sql/SparkSQLExample.scala %}
The entry point into all functionality in Spark is the [`SparkSession`](api/java/index.html#org.apache.spark.sql.SparkSession) class. To create a basic `SparkSession`, just use `SparkSession.builder()`: -{% include_example init_session java/org/apache/spark/examples/sql/JavaSparkSqlExample.java %} +{% include_example init_session java/org/apache/spark/examples/sql/JavaSparkSQLExample.java %}
@@ -105,7 +105,7 @@ from a Hive table, or from [Spark data sources](#data-sources). As an example, the following creates a DataFrame based on the content of a JSON file: -{% include_example create_df scala/org/apache/spark/examples/sql/SparkSqlExample.scala %} +{% include_example create_df scala/org/apache/spark/examples/sql/SparkSQLExample.scala %}
@@ -114,7 +114,7 @@ from a Hive table, or from [Spark data sources](#data-sources). As an example, the following creates a DataFrame based on the content of a JSON file: -{% include_example create_df java/org/apache/spark/examples/sql/JavaSparkSqlExample.java %} +{% include_example create_df java/org/apache/spark/examples/sql/JavaSparkSQLExample.java %}
@@ -155,7 +155,7 @@ Here we include some basic examples of structured data processing using Datasets
-{% include_example untyped_ops scala/org/apache/spark/examples/sql/SparkSqlExample.scala %} +{% include_example untyped_ops scala/org/apache/spark/examples/sql/SparkSQLExample.scala %} For a complete list of the types of operations that can be performed on a Dataset refer to the [API Documentation](api/scala/index.html#org.apache.spark.sql.Dataset). @@ -164,7 +164,7 @@ In addition to simple column references and expressions, Datasets also have a ri
-{% include_example untyped_ops java/org/apache/spark/examples/sql/JavaSparkSqlExample.java %} +{% include_example untyped_ops java/org/apache/spark/examples/sql/JavaSparkSQLExample.java %} For a complete list of the types of operations that can be performed on a Dataset refer to the [API Documentation](api/java/org/apache/spark/sql/Dataset.html). @@ -249,13 +249,13 @@ In addition to simple column references and expressions, DataFrames also have a
The `sql` function on a `SparkSession` enables applications to run SQL queries programmatically and returns the result as a `DataFrame`. -{% include_example run_sql scala/org/apache/spark/examples/sql/SparkSqlExample.scala %} +{% include_example run_sql scala/org/apache/spark/examples/sql/SparkSQLExample.scala %}
The `sql` function on a `SparkSession` enables applications to run SQL queries programmatically and returns the result as a `Dataset`. -{% include_example run_sql java/org/apache/spark/examples/sql/JavaSparkSqlExample.java %} +{% include_example run_sql java/org/apache/spark/examples/sql/JavaSparkSQLExample.java %}
@@ -287,11 +287,11 @@ the bytes back into an object.
-{% include_example create_ds scala/org/apache/spark/examples/sql/SparkSqlExample.scala %} +{% include_example create_ds scala/org/apache/spark/examples/sql/SparkSQLExample.scala %}
-{% include_example create_ds java/org/apache/spark/examples/sql/JavaSparkSqlExample.java %} +{% include_example create_ds java/org/apache/spark/examples/sql/JavaSparkSQLExample.java %}
@@ -318,7 +318,7 @@ reflection and become the names of the columns. Case classes can also be nested types such as `Seq`s or `Array`s. This RDD can be implicitly converted to a DataFrame and then be registered as a table. Tables can be used in subsequent SQL statements. -{% include_example schema_inferring scala/org/apache/spark/examples/sql/SparkSqlExample.scala %} +{% include_example schema_inferring scala/org/apache/spark/examples/sql/SparkSQLExample.scala %}
@@ -330,7 +330,7 @@ does not support JavaBeans that contain `Map` field(s). Nested JavaBeans and `Li fields are supported though. You can create a JavaBean by creating a class that implements Serializable and has getters and setters for all of its fields. -{% include_example schema_inferring java/org/apache/spark/examples/sql/JavaSparkSqlExample.java %} +{% include_example schema_inferring java/org/apache/spark/examples/sql/JavaSparkSQLExample.java %}
@@ -385,7 +385,7 @@ by `SparkSession`. For example: -{% include_example programmatic_schema scala/org/apache/spark/examples/sql/SparkSqlExample.scala %} +{% include_example programmatic_schema scala/org/apache/spark/examples/sql/SparkSQLExample.scala %}
@@ -403,7 +403,7 @@ by `SparkSession`. For example: -{% include_example programmatic_schema java/org/apache/spark/examples/sql/JavaSparkSqlExample.java %} +{% include_example programmatic_schema java/org/apache/spark/examples/sql/JavaSparkSQLExample.java %}
@@ -472,11 +472,11 @@ In the simplest form, the default data source (`parquet` unless otherwise config
-{% include_example generic_load_save_functions scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala %} +{% include_example generic_load_save_functions scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala %}
-{% include_example generic_load_save_functions java/org/apache/spark/examples/sql/JavaSqlDataSourceExample.java %} +{% include_example generic_load_save_functions java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java %}
@@ -507,11 +507,11 @@ using this syntax.
-{% include_example manual_load_options scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala %} +{% include_example manual_load_options scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala %}
-{% include_example manual_load_options java/org/apache/spark/examples/sql/JavaSqlDataSourceExample.java %} +{% include_example manual_load_options java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java %}
@@ -538,11 +538,11 @@ file directly with SQL.
-{% include_example direct_sql scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala %} +{% include_example direct_sql scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala %}
-{% include_example direct_sql java/org/apache/spark/examples/sql/JavaSqlDataSourceExample.java %} +{% include_example direct_sql java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java %}
@@ -633,11 +633,11 @@ Using the data from the above example:
-{% include_example basic_parquet_example scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala %} +{% include_example basic_parquet_example scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala %}
-{% include_example basic_parquet_example java/org/apache/spark/examples/sql/JavaSqlDataSourceExample.java %} +{% include_example basic_parquet_example java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java %}
@@ -766,11 +766,11 @@ turned it off by default starting from 1.5.0. You may enable it by
-{% include_example schema_merging scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala %} +{% include_example schema_merging scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala %}
-{% include_example schema_merging java/org/apache/spark/examples/sql/JavaSqlDataSourceExample.java %} +{% include_example schema_merging java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java %}
@@ -973,7 +973,7 @@ Note that the file that is offered as _a json file_ is not a typical JSON file. line must contain a separate, self-contained valid JSON object. As a consequence, a regular multi-line JSON file will most often fail. -{% include_example json_dataset scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala %} +{% include_example json_dataset scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala %}
@@ -985,7 +985,7 @@ Note that the file that is offered as _a json file_ is not a typical JSON file. line must contain a separate, self-contained valid JSON object. As a consequence, a regular multi-line JSON file will most often fail. -{% include_example json_dataset java/org/apache/spark/examples/sql/JavaSqlDataSourceExample.java %} +{% include_example json_dataset java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java %}
@@ -1880,7 +1880,7 @@ Spark SQL and DataFrames support the following data types: All data types of Spark SQL are located in the package `org.apache.spark.sql.types`. You can access them by doing -{% include_example data_types scala/org/apache/spark/examples/sql/SparkSqlExample.scala %} +{% include_example data_types scala/org/apache/spark/examples/sql/SparkSQLExample.scala %}
diff --git a/examples/src/main/java/org/apache/spark/examples/sql/JavaSqlDataSourceExample.java b/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java similarity index 99% rename from examples/src/main/java/org/apache/spark/examples/sql/JavaSqlDataSourceExample.java rename to examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java index 4db5e1b0af83..2b94b9f114e2 100644 --- a/examples/src/main/java/org/apache/spark/examples/sql/JavaSqlDataSourceExample.java +++ b/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java @@ -35,7 +35,7 @@ // $example off:basic_parquet_example$ import org.apache.spark.sql.SparkSession; -public class JavaSqlDataSourceExample { +public class JavaSQLDataSourceExample { // $example on:schema_merging$ public static class Square implements Serializable { diff --git a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSqlExample.java b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQLExample.java similarity index 99% rename from examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSqlExample.java rename to examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQLExample.java index 586d6e3a3e47..afc18078d471 100644 --- a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSqlExample.java +++ b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQLExample.java @@ -60,7 +60,7 @@ import static org.apache.spark.sql.functions.col; // $example off:untyped_ops$ -public class JavaSparkSqlExample { +public class JavaSparkSQLExample { // $example on:create_ds$ public static class Person implements Serializable { private String name; diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala similarity index 99% rename from examples/src/main/scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala rename to examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala index 61dea6ad2ca9..0caba12af0bd 100644 --- a/examples/src/main/scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala @@ -18,7 +18,7 @@ package org.apache.spark.examples.sql import org.apache.spark.sql.SparkSession -object SqlDataSourceExample { +object SQLDataSourceExample { case class Person(name: String, age: Long) diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala similarity index 99% rename from examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala rename to examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala index 60e72d41af87..952c074d0345 100644 --- a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala @@ -30,7 +30,7 @@ import org.apache.spark.sql.types._ // $example off:data_types$ // $example off:programmatic_schema$ -object SparkSqlExample { +object SparkSQLExample { // $example on:create_ds$ // Note: Case classes in Scala 2.10 can support only up to 22 fields. To work around this limit,