diff --git a/README.md b/README.md index 48688e20..078f595c 100644 --- a/README.md +++ b/README.md @@ -20,13 +20,13 @@ You can link against this library in your program at the following coordinates: ``` groupId: com.databricks artifactId: spark-xml_2.10 -version: 0.3.3 +version: 0.3.4 ``` ### Scala 2.11 ``` groupId: com.databricks artifactId: spark-xml_2.11 -version: 0.3.3 +version: 0.3.4 ``` ## Using with Spark shell @@ -34,12 +34,12 @@ This package can be added to Spark using the `--packages` command line option. ### Spark compiled with Scala 2.10 ``` -$SPARK_HOME/bin/spark-shell --packages com.databricks:spark-xml_2.10:0.3.3 +$SPARK_HOME/bin/spark-shell --packages com.databricks:spark-xml_2.10:0.3.4 ``` ### Spark compiled with Scala 2.11 ``` -$SPARK_HOME/bin/spark-shell --packages com.databricks:spark-xml_2.11:0.3.3 +$SPARK_HOME/bin/spark-shell --packages com.databricks:spark-xml_2.11:0.3.4 ``` ## Features @@ -436,7 +436,7 @@ Automatically infer schema (data types) ```R library(SparkR) -Sys.setenv('SPARKR_SUBMIT_ARGS'='"--packages" "com.databricks:spark-xml_2.10:0.3.3" "sparkr-shell"') +Sys.setenv('SPARKR_SUBMIT_ARGS'='"--packages" "com.databricks:spark-xml_2.10:0.3.4" "sparkr-shell"') sqlContext <- sparkRSQL.init(sc) df <- read.df(sqlContext, "books.xml", source = "com.databricks.spark.xml", rowTag = "book") @@ -449,7 +449,7 @@ You can manually specify schema: ```R library(SparkR) -Sys.setenv('SPARKR_SUBMIT_ARGS'='"--packages" "com.databricks:spark-csv_2.10:0.3.3" "sparkr-shell"') +Sys.setenv('SPARKR_SUBMIT_ARGS'='"--packages" "com.databricks:spark-csv_2.10:0.3.4" "sparkr-shell"') sqlContext <- sparkRSQL.init(sc) customSchema <- structType( structField("@id", "string"), diff --git a/build.sbt b/build.sbt index 36037d43..ca008e3f 100755 --- a/build.sbt +++ b/build.sbt @@ -1,6 +1,6 @@ name := "spark-xml" -version := "0.3.3" +version := "0.3.4" organization := "com.databricks"