diff --git a/core/pom.xml b/core/pom.xml
index 9258a856028a..15275dc74ea4 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -381,9 +381,15 @@
placed in the "provided" scope, rather than the "compile" scope, and NoClassDefFoundError exceptions are handled
when the user has not explicitly compiled with the Hive module.
-->
+
+ ${hive.group}
+ hive-common
+ provided
+
${hive.group}
hive-exec
+ ${hive.classifier}
provided
@@ -391,6 +397,26 @@
hive-metastore
provided
+
+ ${hive.group}
+ hive-serde
+ provided
+
+
+ org.apache.hive
+ hive-storage-api
+ provided
+
+
+ org.apache.hive.shims
+ hive-shims-common
+ provided
+
+
+ org.apache.hive.shims
+ hive-shims-0.23
+ provided
+
org.apache.thrift
libthrift
diff --git a/core/src/main/scala/org/apache/spark/deploy/security/HiveDelegationTokenProvider.scala b/core/src/main/scala/org/apache/spark/deploy/security/HiveDelegationTokenProvider.scala
index ece5ce79c650..7ae1f247a14a 100644
--- a/core/src/main/scala/org/apache/spark/deploy/security/HiveDelegationTokenProvider.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/security/HiveDelegationTokenProvider.scala
@@ -25,7 +25,7 @@ import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
import org.apache.hadoop.hive.conf.HiveConf
-import org.apache.hadoop.hive.ql.metadata.Hive
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient
import org.apache.hadoop.io.Text
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.security.token.Token
@@ -78,6 +78,7 @@ private[security] class HiveDelegationTokenProvider
hadoopConf: Configuration,
sparkConf: SparkConf,
creds: Credentials): Option[Long] = {
+ var metaStoreClient: HiveMetaStoreClient = null
try {
val conf = hiveConf(hadoopConf)
@@ -92,8 +93,8 @@ private[security] class HiveDelegationTokenProvider
s"$principal at $metastoreUri")
doAsRealUser {
- val hive = Hive.get(conf, classOf[HiveConf])
- val tokenStr = hive.getDelegationToken(currentUser.getUserName(), principal)
+ metaStoreClient = new HiveMetaStoreClient(conf.asInstanceOf[HiveConf])
+ val tokenStr = metaStoreClient.getDelegationToken(currentUser.getUserName, principal)
val hive2Token = new Token[DelegationTokenIdentifier]()
hive2Token.decodeFromUrlString(tokenStr)
@@ -111,7 +112,9 @@ private[security] class HiveDelegationTokenProvider
None
} finally {
Utils.tryLogNonFatalError {
- Hive.closeCurrent()
+ if (metaStoreClient != null) {
+ metaStoreClient.close()
+ }
}
}
}
diff --git a/dev/deps/spark-deps-hadoop-2.6 b/dev/deps/spark-deps-hadoop-2.6
index c3d1dd444b50..25831776e757 100644
--- a/dev/deps/spark-deps-hadoop-2.6
+++ b/dev/deps/spark-deps-hadoop-2.6
@@ -1,14 +1,11 @@
-JavaEWAH-0.3.2.jar
RoaringBitmap-0.5.11.jar
ST4-4.0.4.jar
activation-1.1.1.jar
aircompressor-0.8.jar
-antlr-2.7.7.jar
-antlr-runtime-3.4.jar
+antlr-runtime-3.5.2.jar
antlr4-runtime-4.7.jar
aopalliance-1.0.jar
aopalliance-repackaged-2.4.0-b34.jar
-apache-log4j-extras-1.2.17.jar
apacheds-i18n-2.0.0-M15.jar
apacheds-kerberos-codec-2.0.0-M15.jar
api-asn1-api-1.0.0-M20.jar
@@ -18,6 +15,7 @@ arrow-format-0.8.0.jar
arrow-memory-0.8.0.jar
arrow-vector-0.8.0.jar
automaton-1.11-8.jar
+avatica-1.8.0.jar
avro-1.7.7.jar
avro-ipc-1.7.7.jar
avro-mapred-1.7.7-hadoop2.jar
@@ -26,9 +24,9 @@ bcprov-jdk15on-1.58.jar
bonecp-0.8.0.RELEASE.jar
breeze-macros_2.11-0.13.2.jar
breeze_2.11-0.13.2.jar
-calcite-avatica-1.2.0-incubating.jar
-calcite-core-1.2.0-incubating.jar
-calcite-linq4j-1.2.0-incubating.jar
+calcite-core-1.10.0.jar
+calcite-druid-1.10.0.jar
+calcite-linq4j-1.10.0.jar
chill-java-0.8.4.jar
chill_2.11-0.8.4.jar
commons-beanutils-1.7.0.jar
@@ -46,7 +44,7 @@ commons-httpclient-3.1.jar
commons-io-2.4.jar
commons-lang-2.6.jar
commons-lang3-3.5.jar
-commons-logging-1.1.3.jar
+commons-logging-1.2.jar
commons-math3-3.4.1.jar
commons-net-3.1.jar
commons-pool-1.5.4.jar
@@ -55,9 +53,9 @@ core-1.1.2.jar
curator-client-2.6.0.jar
curator-framework-2.6.0.jar
curator-recipes-2.6.0.jar
-datanucleus-api-jdo-3.2.6.jar
-datanucleus-core-3.2.10.jar
-datanucleus-rdbms-3.2.9.jar
+datanucleus-api-jdo-4.2.4.jar
+datanucleus-core-4.1.17.jar
+datanucleus-rdbms-4.1.19.jar
derby-10.12.1.1.jar
eigenbase-properties-1.1.5.jar
flatbuffers-1.2.0-3f79e055.jar
@@ -106,6 +104,7 @@ javassist-3.18.1-GA.jar
javax.annotation-api-1.2.jar
javax.inject-1.jar
javax.inject-2.4.0-b34.jar
+javax.jdo-3.2.0-m3.jar
javax.servlet-api-3.1.0.jar
javax.ws.rs-api-2.0.1.jar
javolution-5.5.1.jar
@@ -126,6 +125,7 @@ jline-2.12.1.jar
joda-time-2.9.3.jar
jodd-core-3.5.2.jar
jpam-1.1.jar
+json-1.8.jar
json4s-ast_2.11-3.5.3.jar
json4s-core_2.11-3.5.3.jar
json4s-jackson_2.11-3.5.3.jar
@@ -157,8 +157,8 @@ objenesis-2.1.jar
okhttp-3.8.1.jar
okio-1.13.0.jar
opencsv-2.3.jar
-orc-core-1.4.3-nohive.jar
-orc-mapreduce-1.4.3-nohive.jar
+orc-core-1.4.3.jar
+orc-mapreduce-1.4.3.jar
oro-2.0.8.jar
osgi-resource-locator-1.0.1.jar
paranamer-2.8.jar
@@ -181,15 +181,14 @@ shapeless_2.11-2.3.2.jar
slf4j-api-1.7.16.jar
slf4j-log4j12-1.7.16.jar
snakeyaml-1.15.jar
-snappy-0.2.jar
snappy-java-1.1.7.1.jar
spire-macros_2.11-0.13.0.jar
spire_2.11-0.13.0.jar
stax-api-1.0-2.jar
stax-api-1.0.1.jar
stream-2.7.0.jar
-stringtemplate-3.2.1.jar
super-csv-2.2.0.jar
+transaction-api-1.1.jar
univocity-parsers-2.5.9.jar
validation-api-1.1.0.Final.jar
xbean-asm5-shaded-4.4.jar
diff --git a/dev/deps/spark-deps-hadoop-2.7 b/dev/deps/spark-deps-hadoop-2.7
index 290867035f91..74b74c275e93 100644
--- a/dev/deps/spark-deps-hadoop-2.7
+++ b/dev/deps/spark-deps-hadoop-2.7
@@ -1,14 +1,11 @@
-JavaEWAH-0.3.2.jar
RoaringBitmap-0.5.11.jar
ST4-4.0.4.jar
activation-1.1.1.jar
aircompressor-0.8.jar
-antlr-2.7.7.jar
-antlr-runtime-3.4.jar
+antlr-runtime-3.5.2.jar
antlr4-runtime-4.7.jar
aopalliance-1.0.jar
aopalliance-repackaged-2.4.0-b34.jar
-apache-log4j-extras-1.2.17.jar
apacheds-i18n-2.0.0-M15.jar
apacheds-kerberos-codec-2.0.0-M15.jar
api-asn1-api-1.0.0-M20.jar
@@ -18,6 +15,7 @@ arrow-format-0.8.0.jar
arrow-memory-0.8.0.jar
arrow-vector-0.8.0.jar
automaton-1.11-8.jar
+avatica-1.8.0.jar
avro-1.7.7.jar
avro-ipc-1.7.7.jar
avro-mapred-1.7.7-hadoop2.jar
@@ -26,9 +24,9 @@ bcprov-jdk15on-1.58.jar
bonecp-0.8.0.RELEASE.jar
breeze-macros_2.11-0.13.2.jar
breeze_2.11-0.13.2.jar
-calcite-avatica-1.2.0-incubating.jar
-calcite-core-1.2.0-incubating.jar
-calcite-linq4j-1.2.0-incubating.jar
+calcite-core-1.10.0.jar
+calcite-druid-1.10.0.jar
+calcite-linq4j-1.10.0.jar
chill-java-0.8.4.jar
chill_2.11-0.8.4.jar
commons-beanutils-1.7.0.jar
@@ -46,7 +44,7 @@ commons-httpclient-3.1.jar
commons-io-2.4.jar
commons-lang-2.6.jar
commons-lang3-3.5.jar
-commons-logging-1.1.3.jar
+commons-logging-1.2.jar
commons-math3-3.4.1.jar
commons-net-3.1.jar
commons-pool-1.5.4.jar
@@ -55,9 +53,9 @@ core-1.1.2.jar
curator-client-2.7.1.jar
curator-framework-2.7.1.jar
curator-recipes-2.7.1.jar
-datanucleus-api-jdo-3.2.6.jar
-datanucleus-core-3.2.10.jar
-datanucleus-rdbms-3.2.9.jar
+datanucleus-api-jdo-4.2.4.jar
+datanucleus-core-4.1.17.jar
+datanucleus-rdbms-4.1.19.jar
derby-10.12.1.1.jar
eigenbase-properties-1.1.5.jar
flatbuffers-1.2.0-3f79e055.jar
@@ -106,6 +104,7 @@ javassist-3.18.1-GA.jar
javax.annotation-api-1.2.jar
javax.inject-1.jar
javax.inject-2.4.0-b34.jar
+javax.jdo-3.2.0-m3.jar
javax.servlet-api-3.1.0.jar
javax.ws.rs-api-2.0.1.jar
javolution-5.5.1.jar
@@ -126,6 +125,7 @@ jline-2.12.1.jar
joda-time-2.9.3.jar
jodd-core-3.5.2.jar
jpam-1.1.jar
+json-1.8.jar
json4s-ast_2.11-3.5.3.jar
json4s-core_2.11-3.5.3.jar
json4s-jackson_2.11-3.5.3.jar
@@ -158,8 +158,8 @@ objenesis-2.1.jar
okhttp-3.8.1.jar
okio-1.13.0.jar
opencsv-2.3.jar
-orc-core-1.4.3-nohive.jar
-orc-mapreduce-1.4.3-nohive.jar
+orc-core-1.4.3.jar
+orc-mapreduce-1.4.3.jar
oro-2.0.8.jar
osgi-resource-locator-1.0.1.jar
paranamer-2.8.jar
@@ -182,15 +182,14 @@ shapeless_2.11-2.3.2.jar
slf4j-api-1.7.16.jar
slf4j-log4j12-1.7.16.jar
snakeyaml-1.15.jar
-snappy-0.2.jar
snappy-java-1.1.7.1.jar
spire-macros_2.11-0.13.0.jar
spire_2.11-0.13.0.jar
stax-api-1.0-2.jar
stax-api-1.0.1.jar
stream-2.7.0.jar
-stringtemplate-3.2.1.jar
super-csv-2.2.0.jar
+transaction-api-1.1.jar
univocity-parsers-2.5.9.jar
validation-api-1.1.0.Final.jar
xbean-asm5-shaded-4.4.jar
diff --git a/dev/test-dependencies.sh b/dev/test-dependencies.sh
index 3bf7618e1ea9..36ea96549e91 100755
--- a/dev/test-dependencies.sh
+++ b/dev/test-dependencies.sh
@@ -78,7 +78,7 @@ for HADOOP_PROFILE in "${HADOOP_PROFILES[@]}"; do
$MVN $HADOOP2_MODULE_PROFILES -P$HADOOP_PROFILE dependency:build-classpath -pl assembly \
| grep "Dependencies classpath:" -A 1 \
| tail -n 1 | tr ":" "\n" | rev | cut -d "/" -f 1 | rev | sort \
- | grep -v spark > dev/pr-deps/spark-deps-$HADOOP_PROFILE
+ | grep -v spark | grep -v hive > dev/pr-deps/spark-deps-$HADOOP_PROFILE
done
if [[ $@ == **replace-manifest** ]]; then
diff --git a/pom.xml b/pom.xml
index 0a711f287a53..ce0d3e67d14b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -123,15 +123,16 @@
1.6.0
3.4.6
2.6.0
- org.spark-project.hive
+ org.apache.hive
- 1.2.1.spark2
+ 2.3.2
- 1.2.1
+ ${hive.version}
+ core
10.12.1.1
1.8.2
1.4.3
- nohive
+ 2.4.0
1.6.0
9.3.20.v20170531
3.1.0
@@ -162,14 +163,15 @@
2.6.7.1
1.1.7.1
1.1.2
- 1.2.0-incubating
+ 1.10.0
+ 1.8.0
1.10
2.4
2.6
3.5
- 3.2.10
+ 4.1.17
3.0.8
2.22.2
2.9.3
@@ -448,6 +450,12 @@
commons-httpclient
commons-httpclient
${httpclient.classic.version}
+
+
+ commons-logging
+ commons-logging
+
+
org.apache.httpcomponents
@@ -536,7 +544,6 @@
org.xerial.snappy
snappy-java
${snappy.version}
- ${hadoop.deps.scope}
org.lz4
@@ -1353,6 +1360,22 @@
${hive.group}
hive-shims
+
+ org.apache.hive
+ hive-storage-api
+
+
+ org.eclipse.jetty.aggregate
+ jetty-all
+
+
+ org.eclipse.jetty.orbit
+ javax.servlet
+
+
+ com.github.joshelser
+ dropwizard-metrics-hadoop-metrics2-reporter
+
org.apache.ant
ant
@@ -1366,12 +1389,16 @@
slf4j-api
- org.slf4j
- slf4j-log4j12
+ org.apache.logging.log4j
+ log4j-web
- log4j
- log4j
+ org.apache.logging.log4j
+ log4j-1.2-api
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
commons-logging
@@ -1383,10 +1410,8 @@
${hive.group}
hive-exec
-
${hive.version}
+ core
${hive.deps.scope}
@@ -1395,6 +1420,14 @@
${hive.group}
hive-metastore
+
+ ${hive.group}
+ hive-vector-code-gen
+
+
+ ${hive.group}
+ hive-llap-tez
+
${hive.group}
hive-shims
@@ -1403,11 +1436,23 @@
${hive.group}
hive-ant
+
+ org.apache.parquet
+ parquet-hadoop-bundle
+
+
+ org.apache.orc
+ orc-tools
+
${hive.group}
spark-client
+
+ calcite-druid
+ org.apache.calcite
+
@@ -1435,6 +1480,10 @@
org.apache.avro
avro-mapred
+
+ org.apache.calcite.avatica
+ avatica
+
org.apache.calcite
@@ -1465,16 +1514,24 @@
zookeeper
- org.slf4j
- slf4j-api
+ org.apache.logging.log4j
+ log4j-api
- org.slf4j
- slf4j-log4j12
+ org.apache.logging.log4j
+ log4j-core
- log4j
- log4j
+ org.apache.logging.log4j
+ log4j-1.2-api
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+
+
+ org.slf4j
+ slf4j-api
commons-logging
@@ -1577,6 +1634,14 @@
${hive.group}
hive-shims
+
+ org.apache.hbase
+ hbase-client
+
+
+ HikariCP
+ com.zaxxer
+
org.apache.thrift
libfb303
@@ -1593,6 +1658,18 @@
com.google.guava
guava
+
+ co.cask.tephra
+ tephra-api
+
+
+ co.cask.tephra
+ tephra-core
+
+
+ co.cask.tephra
+ tephra-hbase-compat-1.0
+
org.slf4j
slf4j-api
@@ -1618,6 +1695,18 @@
${hive.group}
hive-shims
+
+ org.apache.parquet
+ parquet-hadoop-bundle
+
+
+ tomcat
+ jasper-compiler
+
+
+ tomcat
+ jasper-runtime
+
commons-codec
commons-codec
@@ -1661,6 +1750,49 @@
+
+ ${hive.group}
+ hive-llap-common
+ ${hive.version}
+ ${hive.deps.scope}
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+
+ ${hive.group}
+ hive-llap-client
+ ${hive.version}
+ ${hive.deps.scope}
+
+
+ org.apache.zookeeper
+ zookeeper
+
+
+ org.apache.curator
+ curator-framework
+
+
+ org.apache.curator
+ apache-curator
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+
+
+ org.apache.hive
+ hive-storage-api
+ ${hive.storage.api.version}
+
+
net.sf.jpam
jpam
@@ -1677,6 +1809,55 @@
+
+ org.apache.hive.shims
+ hive-shims-common
+ ${hive.version}
+ ${hive.deps.scope}
+
+
+ org.slf4j
+ slf4j-api
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+
+
+ org.apache.thrift
+ libthrift
+
+
+ org.apache.curator
+ curator-framework
+
+
+ org.apache.zookeeper
+ zookeeper
+
+
+
+
+ org.apache.hive.shims
+ hive-shims-0.23
+ ${hive.version}
+ ${hive.deps.scope}
+
+
+ org.apache.hive.shims
+ hive-shims-common
+
+
+ org.slf4j
+ slf4j-api
+
+
+ org.apache.hadoop
+ hadoop-yarn-server-resourcemanager
+
+
+
+
${hive.group}
hive-shims
@@ -1729,7 +1910,6 @@
org.apache.orc
orc-core
${orc.version}
- ${orc.classifier}
${orc.deps.scope}
@@ -1746,7 +1926,6 @@
org.apache.orc
orc-mapreduce
${orc.version}
- ${orc.classifier}
${orc.deps.scope}
@@ -1848,6 +2027,10 @@
com.fasterxml.jackson.core
jackson-databind
+
+ org.apache.calcite.avatica
+ avatica
+
com.google.guava
guava
@@ -1879,9 +2062,36 @@
org.apache.calcite
- calcite-avatica
+ calcite-druid
${calcite.version}
+
+ org.apache.calcite
+ calcite-core
+
+
+ org.apache.calcite
+ calcite-linq4j
+
+
+ org.apache.calcite.avatica
+ avatica
+
+
+ com.google.guava
+ guava
+
+
+
+
+ org.apache.calcite.avatica
+ avatica
+ ${avatica.version}
+
+
+ org.apache.calcite.avatica
+ avatica-metrics
+
com.fasterxml.jackson.core
jackson-annotations
@@ -1894,6 +2104,10 @@
com.fasterxml.jackson.core
jackson-databind
+
+ com.google.protobuf
+ protobuf-java
+
diff --git a/resource-managers/mesos/pom.xml b/resource-managers/mesos/pom.xml
index 3995d0afeb5f..b9ba6cdd9077 100644
--- a/resource-managers/mesos/pom.xml
+++ b/resource-managers/mesos/pom.xml
@@ -77,6 +77,7 @@
${hive.group}
hive-exec
+ ${hive.classifier}
provided
diff --git a/resource-managers/yarn/pom.xml b/resource-managers/yarn/pom.xml
index 37e25ceecb88..e856fad0d330 100644
--- a/resource-managers/yarn/pom.xml
+++ b/resource-managers/yarn/pom.xml
@@ -172,6 +172,7 @@
${hive.group}
hive-exec
+ ${hive.classifier}
provided
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
index b376108399c1..3927b80539c9 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
@@ -645,8 +645,10 @@ abstract class ExternalCatalogSuite extends SparkFunSuite with BeforeAndAfterEac
assert(oldPart2.storage.locationUri != Some(newLocation))
// alter other storage information
catalog.alterPartitions("db2", "tbl2", Seq(
- oldPart1.copy(storage = storageFormat.copy(serde = Some(newSerde))),
- oldPart2.copy(storage = storageFormat.copy(properties = newSerdeProps))))
+ oldPart1.copy(parameters = newPart1.parameters,
+ storage = storageFormat.copy(serde = Some(newSerde))),
+ oldPart2.copy(parameters = newPart1.parameters,
+ storage = storageFormat.copy(properties = newSerdeProps))))
val newPart1b = catalog.getPartition("db2", "tbl2", part1.spec)
val newPart2b = catalog.getPartition("db2", "tbl2", part2.spec)
assert(newPart1b.storage.serde == Some(newSerde))
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
index 6abab0073cca..696ddb38f584 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
@@ -955,8 +955,10 @@ abstract class SessionCatalogSuite extends AnalysisTest {
val oldPart1 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part1.spec)
val oldPart2 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part2.spec)
catalog.alterPartitions(TableIdentifier("tbl2", Some("db2")), Seq(
- oldPart1.copy(storage = storageFormat.copy(locationUri = Some(newLocation))),
- oldPart2.copy(storage = storageFormat.copy(locationUri = Some(newLocation)))))
+ oldPart1.copy(parameters = oldPart1.parameters,
+ storage = storageFormat.copy(locationUri = Some(newLocation))),
+ oldPart2.copy(parameters = oldPart2.parameters,
+ storage = storageFormat.copy(locationUri = Some(newLocation)))))
val newPart1 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part1.spec)
val newPart2 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part2.spec)
assert(newPart1.storage.locationUri == Some(newLocation))
@@ -965,7 +967,9 @@ abstract class SessionCatalogSuite extends AnalysisTest {
assert(oldPart2.storage.locationUri != Some(newLocation))
// Alter partitions without explicitly specifying database
catalog.setCurrentDatabase("db2")
- catalog.alterPartitions(TableIdentifier("tbl2"), Seq(oldPart1, oldPart2))
+ catalog.alterPartitions(TableIdentifier("tbl2"),
+ Seq(oldPart1.copy(parameters = newPart1.parameters),
+ oldPart2.copy(parameters = newPart2.parameters)))
val newerPart1 = catalog.getPartition(TableIdentifier("tbl2"), part1.spec)
val newerPart2 = catalog.getPartition(TableIdentifier("tbl2"), part2.spec)
assert(oldPart1.storage.locationUri == newerPart1.storage.locationUri)
diff --git a/sql/core/pom.xml b/sql/core/pom.xml
index ef41837f89d6..26c86af9a6a5 100644
--- a/sql/core/pom.xml
+++ b/sql/core/pom.xml
@@ -86,15 +86,17 @@
test
+
+ org.apache.hive
+ hive-storage-api
+
org.apache.orc
orc-core
- ${orc.classifier}
org.apache.orc
orc-mapreduce
- ${orc.classifier}
org.apache.parquet
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java
index 12f4d658b186..e9101714c716 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java
@@ -19,7 +19,7 @@
import java.math.BigDecimal;
-import org.apache.orc.storage.ql.exec.vector.*;
+import org.apache.hadoop.hive.ql.exec.vector.*;
import org.apache.spark.sql.types.DataType;
import org.apache.spark.sql.types.Decimal;
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnarBatchReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnarBatchReader.java
index dcebdc39f0aa..873d2e54e862 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnarBatchReader.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnarBatchReader.java
@@ -21,6 +21,9 @@
import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.ql.exec.vector.*;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
@@ -30,9 +33,6 @@
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapred.OrcInputFormat;
-import org.apache.orc.storage.common.type.HiveDecimal;
-import org.apache.orc.storage.ql.exec.vector.*;
-import org.apache.orc.storage.serde2.io.HiveDecimalWritable;
import org.apache.spark.memory.MemoryMode;
import org.apache.spark.sql.catalyst.InternalRow;
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcDeserializer.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcDeserializer.scala
index 4ecc54bd2fd9..c23c17185f94 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcDeserializer.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcDeserializer.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.datasources.orc
+import org.apache.hadoop.hive.serde2.io.{DateWritable, HiveDecimalWritable}
import org.apache.hadoop.io._
import org.apache.orc.mapred.{OrcList, OrcMap, OrcStruct, OrcTimestamp}
-import org.apache.orc.storage.serde2.io.{DateWritable, HiveDecimalWritable}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{SpecificInternalRow, UnsafeArrayData}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilters.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilters.scala
index 4f44ae4fa1d7..6a57d9598a6d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilters.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilters.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.datasources.orc
-import org.apache.orc.storage.ql.io.sarg.{PredicateLeaf, SearchArgument, SearchArgumentFactory}
-import org.apache.orc.storage.ql.io.sarg.SearchArgument.Builder
-import org.apache.orc.storage.serde2.io.HiveDecimalWritable
+import org.apache.hadoop.hive.ql.io.sarg.{PredicateLeaf, SearchArgument, SearchArgumentFactory}
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.Builder
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types._
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcSerializer.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcSerializer.scala
index 899af0750cad..674fa4c80dd5 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcSerializer.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcSerializer.scala
@@ -17,11 +17,11 @@
package org.apache.spark.sql.execution.datasources.orc
+import org.apache.hadoop.hive.common.`type`.HiveDecimal
+import org.apache.hadoop.hive.serde2.io.{DateWritable, HiveDecimalWritable}
import org.apache.hadoop.io._
import org.apache.orc.TypeDescription
import org.apache.orc.mapred.{OrcList, OrcMap, OrcStruct, OrcTimestamp}
-import org.apache.orc.storage.common.`type`.HiveDecimal
-import org.apache.orc.storage.serde2.io.{DateWritable, HiveDecimalWritable}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.SpecializedGetters
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala
index 8269d4d3a285..3cfc911a92a8 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala
@@ -371,7 +371,7 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils {
test("correctly handle ALTER VIEW") {
withTable("jt2") {
withView("testView") {
- sql("CREATE VIEW testView AS SELECT id FROM jt")
+ sql("CREATE VIEW testView AS SELECT 1 as c1 FROM jt")
val df = (1 until 10).map(i => i -> i).toDF("i", "j")
df.write.format("json").saveAsTable("jt2")
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
index 404117626242..58e3ffc18da9 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
@@ -1266,7 +1266,7 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
// if (isUsingHiveMetastore) {
// assert(storageFormat.properties.get("path") === expected)
// }
- assert(storageFormat.locationUri === Some(expected))
+ assert(Some(storageFormat.locationUri.get.getPath) === Some(expected.getPath))
}
// set table location
sql("ALTER TABLE dbx.tab1 SET LOCATION '/path/to/your/lovely/heart'")
@@ -2514,9 +2514,9 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
}.getMessage
assert(e.contains("Found duplicate column(s)"))
} else {
- sql("ALTER TABLE t1 ADD COLUMNS (C1 string)")
+ sql("ALTER TABLE t1 ADD COLUMNS (C2 string)")
assert(spark.table("t1").schema ==
- new StructType().add("c1", IntegerType).add("C1", StringType))
+ new StructType().add("c1", IntegerType).add("C2", StringType))
}
}
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala
index 8680b86517b1..4ebd55d6e0d7 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala
@@ -22,7 +22,7 @@ import java.sql.{Date, Timestamp}
import scala.collection.JavaConverters._
-import org.apache.orc.storage.ql.io.sarg.{PredicateLeaf, SearchArgument}
+import org.apache.hadoop.hive.ql.io.sarg.{PredicateLeaf, SearchArgument}
import org.apache.spark.sql.{Column, DataFrame}
import org.apache.spark.sql.catalyst.dsl.expressions._
@@ -36,7 +36,7 @@ import org.apache.spark.sql.types._
* A test suite that tests Apache ORC filter API based filter pushdown optimization.
* OrcFilterSuite and HiveOrcFilterSuite is logically duplicated to provide the same test coverage.
* The difference are the packages containing 'Predicate' and 'SearchArgument' classes.
- * - OrcFilterSuite uses 'org.apache.orc.storage.ql.io.sarg' package.
+ * - OrcFilterSuite uses 'org.apache.hadoop.hive.ql.io.sarg' package.
* - HiveOrcFilterSuite uses 'org.apache.hadoop.hive.ql.io.sarg' package.
*/
class OrcFilterSuite extends OrcTest with SharedSQLContext {
diff --git a/sql/hive-thriftserver/if/TCLIService.thrift b/sql/hive-thriftserver/if/TCLIService.thrift
index 7cd6fa37cec3..824b04919073 100644
--- a/sql/hive-thriftserver/if/TCLIService.thrift
+++ b/sql/hive-thriftserver/if/TCLIService.thrift
@@ -32,14 +32,14 @@
// * Service names begin with the letter "T", use a capital letter for each
// new word (with no underscores), and end with the word "Service".
-namespace java org.apache.hive.service.cli.thrift
-namespace cpp apache.hive.service.cli.thrift
+namespace java org.apache.hive.service.rpc.thrift
+namespace cpp apache.hive.service.rpc.thrift
// List of protocol versions. A new token should be
// added to the end of this list every time a change is made.
enum TProtocolVersion {
HIVE_CLI_SERVICE_PROTOCOL_V1,
-
+
// V2 adds support for asynchronous execution
HIVE_CLI_SERVICE_PROTOCOL_V2
@@ -60,6 +60,12 @@ enum TProtocolVersion {
// V8 adds support for interval types
HIVE_CLI_SERVICE_PROTOCOL_V8
+
+ // V9 adds support for serializing ResultSets in SerDe
+ HIVE_CLI_SERVICE_PROTOCOL_V9
+
+ // V10 adds support for in place updates via GetOperationStatus
+ HIVE_CLI_SERVICE_PROTOCOL_V10
}
enum TTypeId {
@@ -86,7 +92,7 @@ enum TTypeId {
INTERVAL_YEAR_MONTH_TYPE,
INTERVAL_DAY_TIME_TYPE
}
-
+
const set PRIMITIVE_TYPES = [
TTypeId.BOOLEAN_TYPE,
TTypeId.TINYINT_TYPE,
@@ -265,7 +271,7 @@ struct TColumnDesc {
// The type descriptor for this column
2: required TTypeDesc typeDesc
-
+
// The ordinal position of this column in the schema
3: required i32 position
@@ -402,6 +408,8 @@ struct TRowSet {
1: required i64 startRowOffset
2: required list rows
3: optional list columns
+ 4: optional binary binaryColumns
+ 5: optional i32 columnCount
}
// The return status code contained in each response.
@@ -456,6 +464,9 @@ enum TOperationState {
// The operation is in an pending state
PENDING_STATE,
+
+ // The operation is in an timedout state
+ TIMEDOUT_STATE,
}
// A string identifier. This is interpreted literally.
@@ -551,7 +562,7 @@ struct TOperationHandle {
// which operations may be executed.
struct TOpenSessionReq {
// The version of the HiveServer2 protocol that the client is using.
- 1: required TProtocolVersion client_protocol = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8
+ 1: required TProtocolVersion client_protocol = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10
// Username and password for authentication.
// Depending on the authentication scheme being used,
@@ -570,7 +581,7 @@ struct TOpenSessionResp {
1: required TStatus status
// The protocol version that the server is using.
- 2: required TProtocolVersion serverProtocolVersion = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8
+ 2: required TProtocolVersion serverProtocolVersion = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10
// Session Handle
3: optional TSessionHandle sessionHandle
@@ -661,7 +672,7 @@ union TGetInfoValue {
// The function returns general information about the data source
// using the same keys as ODBC.
struct TGetInfoReq {
- // The session to run this request against
+ // The sesssion to run this request against
1: required TSessionHandle sessionHandle
2: required TGetInfoType infoType
@@ -692,9 +703,12 @@ struct TExecuteStatementReq {
// is executed. These properties apply to this statement
// only and will not affect the subsequent state of the Session.
3: optional map confOverlay
-
+
// Execute asynchronously when runAsync is true
4: optional bool runAsync = false
+
+ // The number of seconds after which the query will timeout on the server
+ 5: optional i64 queryTimeout = 0
}
struct TExecuteStatementResp {
@@ -718,13 +732,13 @@ struct TGetTypeInfoReq {
struct TGetTypeInfoResp {
1: required TStatus status
2: optional TOperationHandle operationHandle
-}
+}
// GetCatalogs()
//
-// Returns the list of catalogs (databases)
-// Results are ordered by TABLE_CATALOG
+// Returns the list of catalogs (databases)
+// Results are ordered by TABLE_CATALOG
//
// Resultset columns :
// col1
@@ -834,9 +848,9 @@ struct TGetTablesResp {
// GetTableTypes()
//
-// Returns the table types available in this database.
-// The results are ordered by table type.
-//
+// Returns the table types available in this database.
+// The results are ordered by table type.
+//
// col1
// name: TABLE_TYPE
// type: STRING
@@ -857,8 +871,8 @@ struct TGetTableTypesResp {
// Returns a list of columns in the specified tables.
// The information is returned as a result set which can be fetched
// using the OperationHandle provided in the response.
-// Results are ordered by TABLE_CAT, TABLE_SCHEM, TABLE_NAME,
-// and ORDINAL_POSITION.
+// Results are ordered by TABLE_CAT, TABLE_SCHEM, TABLE_NAME,
+// and ORDINAL_POSITION.
//
// Result Set Columns are the same as those for the ODBC CLIColumns
// function.
@@ -954,7 +968,53 @@ struct TGetFunctionsResp {
1: required TStatus status
2: optional TOperationHandle operationHandle
}
-
+
+struct TGetPrimaryKeysReq {
+ // Session to run this request against
+ 1: required TSessionHandle sessionHandle
+
+ // Name of the catalog.
+ 2: optional TIdentifier catalogName
+
+ // Name of the schema.
+ 3: optional TIdentifier schemaName
+
+ // Name of the table.
+ 4: optional TIdentifier tableName
+}
+
+struct TGetPrimaryKeysResp {
+ 1: required TStatus status
+ 2: optional TOperationHandle operationHandle
+}
+
+struct TGetCrossReferenceReq {
+ // Session to run this request against
+ 1: required TSessionHandle sessionHandle
+
+ // Name of the parent catalog.
+ 2: optional TIdentifier parentCatalogName
+
+ // Name of the parent schema.
+ 3: optional TIdentifier parentSchemaName
+
+ // Name of the parent table.
+ 4: optional TIdentifier parentTableName
+
+ // Name of the foreign catalog.
+ 5: optional TIdentifier foreignCatalogName
+
+ // Name of the foreign schema.
+ 6: optional TIdentifier foreignSchemaName
+
+ // Name of the foreign table.
+ 7: optional TIdentifier foreignTableName
+}
+
+struct TGetCrossReferenceResp {
+ 1: required TStatus status
+ 2: optional TOperationHandle operationHandle
+}
// GetOperationStatus()
//
@@ -962,6 +1022,8 @@ struct TGetFunctionsResp {
struct TGetOperationStatusReq {
// Session to run this request against
1: required TOperationHandle operationHandle
+ // optional arguments to get progress information
+ 2: optional bool getProgressUpdate
}
struct TGetOperationStatusResp {
@@ -977,6 +1039,21 @@ struct TGetOperationStatusResp {
// Error message
5: optional string errorMessage
+
+ // List of statuses of sub tasks
+ 6: optional string taskStatus
+
+ // When was the operation started
+ 7: optional i64 operationStarted
+
+ // When was the operation completed
+ 8: optional i64 operationCompleted
+
+ // If the operation has the result
+ 9: optional bool hasResultSet
+
+ 10: optional TProgressUpdateResp progressUpdateResponse
+
}
@@ -1032,7 +1109,7 @@ enum TFetchOrientation {
FETCH_PRIOR,
// Return the rowset at the given fetch offset relative
- // to the current rowset.
+ // to the curren rowset.
// NOT SUPPORTED
FETCH_RELATIVE,
@@ -1059,7 +1136,7 @@ struct TFetchResultsReq {
// The fetch orientation. For V1 this must be either
// FETCH_NEXT or FETCH_FIRST. Defaults to FETCH_NEXT.
2: required TFetchOrientation orientation = TFetchOrientation.FETCH_NEXT
-
+
// Max number of rows that should be returned in
// the rowset.
3: required i64 maxRows
@@ -1132,6 +1209,21 @@ struct TRenewDelegationTokenResp {
1: required TStatus status
}
+enum TJobExecutionStatus {
+ IN_PROGRESS,
+ COMPLETE,
+ NOT_AVAILABLE
+}
+
+struct TProgressUpdateResp {
+ 1: required list headerNames
+ 2: required list> rows
+ 3: required double progressedPercentage
+ 4: required TJobExecutionStatus status
+ 5: required string footerSummary
+ 6: required i64 startTime
+}
+
service TCLIService {
TOpenSessionResp OpenSession(1:TOpenSessionReq req);
@@ -1156,8 +1248,12 @@ service TCLIService {
TGetFunctionsResp GetFunctions(1:TGetFunctionsReq req);
+ TGetPrimaryKeysResp GetPrimaryKeys(1:TGetPrimaryKeysReq req);
+
+ TGetCrossReferenceResp GetCrossReference(1:TGetCrossReferenceReq req);
+
TGetOperationStatusResp GetOperationStatus(1:TGetOperationStatusReq req);
-
+
TCancelOperationResp CancelOperation(1:TCancelOperationReq req);
TCloseOperationResp CloseOperation(1:TCloseOperationReq req);
diff --git a/sql/hive-thriftserver/pom.xml b/sql/hive-thriftserver/pom.xml
index 9f247f9224c7..4f5a79821cc5 100644
--- a/sql/hive-thriftserver/pom.xml
+++ b/sql/hive-thriftserver/pom.xml
@@ -55,6 +55,11 @@
${hive.group}
hive-cli
+
+ ${hive.group}
+ hive-exec
+ ${hive.classifier}
+
${hive.group}
hive-jdbc
@@ -63,6 +68,10 @@
${hive.group}
hive-beeline
+
+ org.apache.hive.shims
+ hive-shims-common
+
org.eclipse.jetty
jetty-server
diff --git a/sql/hive-thriftserver/src/gen/java/org/apache/hive/service/cli/thrift/TCLIServiceConstants.java b/sql/hive-thriftserver/src/gen/java/org/apache/hive/service/cli/thrift/TCLIServiceConstants.java
deleted file mode 100644
index 25a38b178428..000000000000
--- a/sql/hive-thriftserver/src/gen/java/org/apache/hive/service/cli/thrift/TCLIServiceConstants.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.0)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- * @generated
- */
-package org.apache.hive.service.cli.thrift;
-
-import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TCLIServiceConstants {
-
- public static final Set PRIMITIVE_TYPES = new HashSet();
- static {
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.BOOLEAN_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.TINYINT_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.SMALLINT_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.INT_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.BIGINT_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.FLOAT_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.DOUBLE_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.STRING_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.TIMESTAMP_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.BINARY_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.DECIMAL_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.NULL_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.DATE_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.VARCHAR_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.CHAR_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.INTERVAL_YEAR_MONTH_TYPE);
- PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.INTERVAL_DAY_TIME_TYPE);
- }
-
- public static final Set COMPLEX_TYPES = new HashSet();
- static {
- COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.ARRAY_TYPE);
- COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.MAP_TYPE);
- COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.STRUCT_TYPE);
- COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.UNION_TYPE);
- COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.USER_DEFINED_TYPE);
- }
-
- public static final Set COLLECTION_TYPES = new HashSet();
- static {
- COLLECTION_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.ARRAY_TYPE);
- COLLECTION_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.MAP_TYPE);
- }
-
- public static final Map TYPE_NAMES = new HashMap();
- static {
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.BOOLEAN_TYPE, "BOOLEAN");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.TINYINT_TYPE, "TINYINT");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.SMALLINT_TYPE, "SMALLINT");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.INT_TYPE, "INT");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.BIGINT_TYPE, "BIGINT");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.FLOAT_TYPE, "FLOAT");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.DOUBLE_TYPE, "DOUBLE");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.STRING_TYPE, "STRING");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.TIMESTAMP_TYPE, "TIMESTAMP");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.BINARY_TYPE, "BINARY");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.ARRAY_TYPE, "ARRAY");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.MAP_TYPE, "MAP");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.STRUCT_TYPE, "STRUCT");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.UNION_TYPE, "UNIONTYPE");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.DECIMAL_TYPE, "DECIMAL");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.NULL_TYPE, "NULL");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.DATE_TYPE, "DATE");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.VARCHAR_TYPE, "VARCHAR");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.CHAR_TYPE, "CHAR");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.INTERVAL_YEAR_MONTH_TYPE, "INTERVAL_YEAR_MONTH");
- TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.INTERVAL_DAY_TIME_TYPE, "INTERVAL_DAY_TIME");
- }
-
- public static final String CHARACTER_MAXIMUM_LENGTH = "characterMaximumLength";
-
- public static final String PRECISION = "precision";
-
- public static final String SCALE = "scale";
-
-}
diff --git a/sql/hive-thriftserver/src/gen/java/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java b/sql/hive-thriftserver/src/gen/java/org/apache/hive/service/rpc/thrift/TArrayTypeEntry.java
similarity index 87%
rename from sql/hive-thriftserver/src/gen/java/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java
rename to sql/hive-thriftserver/src/gen/java/org/apache/hive/service/rpc/thrift/TArrayTypeEntry.java
index 6323d34eac73..a26ac82b85fe 100644
--- a/sql/hive-thriftserver/src/gen/java/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java
+++ b/sql/hive-thriftserver/src/gen/java/org/apache/hive/service/rpc/thrift/TArrayTypeEntry.java
@@ -1,37 +1,26 @@
/**
- * Autogenerated by Thrift Compiler (0.9.0)
+ * Autogenerated by Thrift Compiler (0.9.3)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
-package org.apache.hive.service.cli.thrift;
+package org.apache.hive.service.rpc.thrift;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
-
import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TArrayTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+
+import javax.annotation.Generated;
+import java.util.*;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class TArrayTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TArrayTypeEntry");
private static final org.apache.thrift.protocol.TField OBJECT_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("objectTypePtr", org.apache.thrift.protocol.TType.I32, (short)1);
@@ -181,7 +170,7 @@ public void setFieldValue(_Fields field, Object value) {
public Object getFieldValue(_Fields field) {
switch (field) {
case OBJECT_TYPE_PTR:
- return Integer.valueOf(getObjectTypePtr());
+ return getObjectTypePtr();
}
throw new IllegalStateException();
@@ -227,30 +216,30 @@ public boolean equals(TArrayTypeEntry that) {
@Override
public int hashCode() {
- HashCodeBuilder builder = new HashCodeBuilder();
+ List