From 931a392f3806b5fb8b10dc88eaf5866601b3762e Mon Sep 17 00:00:00 2001
From: Takeshi Yamamuro
Date: Fri, 6 Mar 2020 09:22:18 +0900
Subject: [PATCH] Fix
---
docs/sql-pyspark-pandas-with-arrow.md | 2 +-
docs/sql-ref-ansi-compliance.md | 2 +-
docs/sql-ref-null-semantics.md | 2 +-
docs/sql-ref-syntax-aux-show-functions.md | 2 +-
docs/sql-ref-syntax-ddl-alter-table.md | 2 +-
docs/sql-ref-syntax-ddl-create-table-like.md | 2 +-
docs/sql-ref-syntax-qry-select-limit.md | 2 +-
7 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/docs/sql-pyspark-pandas-with-arrow.md b/docs/sql-pyspark-pandas-with-arrow.md
index 63ba0ba5e1b9..e8abb9fed1b2 100644
--- a/docs/sql-pyspark-pandas-with-arrow.md
+++ b/docs/sql-pyspark-pandas-with-arrow.md
@@ -91,7 +91,7 @@ specify the type hints of `pandas.Series` and `pandas.DataFrame` as below:
-In the following sections, it describes the cominations of the supported type hints. For simplicity,
+In the following sections, it describes the combinations of the supported type hints. For simplicity,
`pandas.DataFrame` variant is omitted.
### Series to Series
diff --git a/docs/sql-ref-ansi-compliance.md b/docs/sql-ref-ansi-compliance.md
index 267184a1cb5f..27e60b4bb8e3 100644
--- a/docs/sql-ref-ansi-compliance.md
+++ b/docs/sql-ref-ansi-compliance.md
@@ -60,7 +60,7 @@ The following subsections present behaviour changes in arithmetic operations, ty
### Arithmetic Operations
In Spark SQL, arithmetic operations performed on numeric types (with the exception of decimal) are not checked for overflows by default.
-This means that in case an operation causes overflows, the result is the same that the same operation returns in a Java/Scala program (e.g., if the sum of 2 integers is higher than the maximum value representable, the result is a negative number).
+This means that in case an operation causes overflows, the result is the same with the corresponding operation in a Java/Scala program (e.g., if the sum of 2 integers is higher than the maximum value representable, the result is a negative number).
On the other hand, Spark SQL returns null for decimal overflows.
When `spark.sql.ansi.enabled` is set to `true` and an overflow occurs in numeric and interval arithmetic operations, it throws an arithmetic exception at runtime.
diff --git a/docs/sql-ref-null-semantics.md b/docs/sql-ref-null-semantics.md
index 3cbc15c600ce..37b4081d6b27 100644
--- a/docs/sql-ref-null-semantics.md
+++ b/docs/sql-ref-null-semantics.md
@@ -605,7 +605,7 @@ SELECT name, age FROM unknown_age;
In Spark, EXISTS and NOT EXISTS expressions are allowed inside a WHERE clause.
These are boolean expressions which return either `TRUE` or
`FALSE`. In other words, EXISTS is a membership condition and returns `TRUE`
-when the subquery it refers to returns one or more rows. Similary, NOT EXISTS
+when the subquery it refers to returns one or more rows. Similarly, NOT EXISTS
is a non-membership condition and returns TRUE when no rows or zero rows are
returned from the subquery.
diff --git a/docs/sql-ref-syntax-aux-show-functions.md b/docs/sql-ref-syntax-aux-show-functions.md
index 701d427039aa..d6f9df9896af 100644
--- a/docs/sql-ref-syntax-aux-show-functions.md
+++ b/docs/sql-ref-syntax-aux-show-functions.md
@@ -22,7 +22,7 @@ license: |
### Description
Returns the list of functions after applying an optional regex pattern.
Given number of functions supported by Spark is quite large, this statement
-in conjuction with [describe function](sql-ref-syntax-aux-describe-function.html)
+in conjunction with [describe function](sql-ref-syntax-aux-describe-function.html)
may be used to quickly find the function and understand its usage. The `LIKE`
clause is optional and supported only for compatibility with other systems.
diff --git a/docs/sql-ref-syntax-ddl-alter-table.md b/docs/sql-ref-syntax-ddl-alter-table.md
index a921478daa47..373fa8d8940b 100644
--- a/docs/sql-ref-syntax-ddl-alter-table.md
+++ b/docs/sql-ref-syntax-ddl-alter-table.md
@@ -260,7 +260,7 @@ ALTER TABLE dbx.tab1 PARTITION (a='1', b='2') SET LOCATION '/path/to/part/ways'
-- SET SERDE/ SERDE Properties
ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
-ALTER TABLE dbx.tab1 SET SERDE 'org.apache.madoop' WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')
+ALTER TABLE dbx.tab1 SET SERDE 'org.apache.hadoop' WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')
--SET TABLE PROPERTIES
ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('winner' = 'loser')
diff --git a/docs/sql-ref-syntax-ddl-create-table-like.md b/docs/sql-ref-syntax-ddl-create-table-like.md
index b7d7bdd1d980..f49fd7fb24c9 100644
--- a/docs/sql-ref-syntax-ddl-create-table-like.md
+++ b/docs/sql-ref-syntax-ddl-create-table-like.md
@@ -74,7 +74,7 @@ USING data_source
### Examples
{% highlight sql %}
---Create table using an exsisting table
+--Create table using an existing table
CREATE TABLE Student_Dupli like Student;
--Create table like using a data source
diff --git a/docs/sql-ref-syntax-qry-select-limit.md b/docs/sql-ref-syntax-qry-select-limit.md
index 2b9999cc4078..06925e6ee353 100644
--- a/docs/sql-ref-syntax-qry-select-limit.md
+++ b/docs/sql-ref-syntax-qry-select-limit.md
@@ -20,7 +20,7 @@ license: |
---
The LIMIT clause is used to constrain the number of rows returned by
the [SELECT](sql-ref-syntax-qry-select.html) statement. In general, this clause
-is used in conjuction with [ORDER BY](sql-ref-syntax-qry-select-orderby.html) to
+is used in conjunction with [ORDER BY](sql-ref-syntax-qry-select-orderby.html) to
ensure that the results are deterministic.
### Syntax