From 66a8c6be4768d8d35a0828e1c2cbe93fcf60c2aa Mon Sep 17 00:00:00 2001 From: "chonghu.lh" Date: Wed, 7 Feb 2024 19:57:21 +0800 Subject: [PATCH 1/4] Add hadoop-compat-bench --- hadoop-compat-bench/HdfsCompatBenchIssue.md | 58 ++ hadoop-compat-bench/pom.xml | 118 +++ hadoop-compat-bench/shell/cases/attr.t | 42 + hadoop-compat-bench/shell/cases/concat.t | 20 + hadoop-compat-bench/shell/cases/copy.t | 17 + hadoop-compat-bench/shell/cases/fileinfo.t | 35 + .../shell/cases/modification.t | 18 + hadoop-compat-bench/shell/cases/move.t | 17 + hadoop-compat-bench/shell/cases/read.t | 23 + hadoop-compat-bench/shell/cases/remove.t | 24 + hadoop-compat-bench/shell/cases/snapshot.t | 13 + .../shell/cases/storagePolicy.t | 22 + hadoop-compat-bench/shell/misc.sh | 154 ++++ .../hadoop/compat/AbstractHdfsCompatCase.java | 82 ++ .../hadoop/compat/HdfsCompatApiScope.java | 352 +++++++++ .../apache/hadoop/compat/HdfsCompatCase.java | 31 + .../hadoop/compat/HdfsCompatCaseCleanup.java | 28 + .../hadoop/compat/HdfsCompatCaseGroup.java | 29 + .../hadoop/compat/HdfsCompatCasePrepare.java | 28 + .../hadoop/compat/HdfsCompatCaseSetUp.java | 28 + .../hadoop/compat/HdfsCompatCaseTearDown.java | 28 + .../hadoop/compat/HdfsCompatCommand.java | 127 +++ .../hadoop/compat/HdfsCompatEnvironment.java | 155 ++++ .../HdfsCompatIllegalArgumentException.java | 8 + .../HdfsCompatIllegalCaseException.java | 31 + .../hadoop/compat/HdfsCompatReport.java | 79 ++ .../hadoop/compat/HdfsCompatShellScope.java | 403 ++++++++++ .../apache/hadoop/compat/HdfsCompatSuite.java | 10 + .../apache/hadoop/compat/HdfsCompatUtil.java | 117 +++ .../hadoop/compat/HdfsCompatibility.java | 241 ++++++ .../compat/cases/function/HdfsCompatAcl.java | 120 +++ .../cases/function/HdfsCompatCreate.java | 152 ++++ .../cases/function/HdfsCompatDirectory.java | 145 ++++ .../compat/cases/function/HdfsCompatFile.java | 240 ++++++ .../cases/function/HdfsCompatLocal.java | 111 +++ .../cases/function/HdfsCompatServer.java | 227 ++++++ .../cases/function/HdfsCompatSnapshot.java | 138 ++++ .../function/HdfsCompatStoragePolicy.java | 106 +++ .../cases/function/HdfsCompatSymlink.java | 70 ++ .../cases/function/HdfsCompatTpcds.java | 120 +++ .../cases/function/HdfsCompatXAttr.java | 94 +++ .../implement/HdfsCompatFileSystemImpl.java | 735 ++++++++++++++++++ .../compat/suites/HdfsCompatSuiteForAll.java | 68 ++ .../suites/HdfsCompatSuiteForShell.java | 52 ++ .../suites/HdfsCompatSuiteForTpcds.java | 44 ++ .../hadoop-compat-bench-log4j.properties | 24 + .../compat/TestHdfsCompatDefaultSuites.java | 60 ++ .../compat/TestHdfsCompatFsCommand.java | 179 +++++ .../TestHdfsCompatInterfaceCoverage.java | 57 ++ .../compat/TestHdfsCompatShellCommand.java | 127 +++ .../compat/cases/HdfsCompatAclTestCases.java | 68 ++ .../cases/HdfsCompatMkdirTestCases.java | 31 + .../compat/hdfs/HdfsCompatMiniCluster.java | 114 +++ .../compat/hdfs/HdfsCompatTestCommand.java | 35 + .../compat/hdfs/HdfsCompatTestShellScope.java | 96 +++ .../hadoop-compat-bench-test-shell-hadoop.sh | 29 + .../hadoop-compat-bench-test-shell-hdfs.sh | 33 + .../src/test/resources/test-case-simple.t | 10 + .../src/test/resources/test-case-skip.t | 8 + 59 files changed, 5631 insertions(+) create mode 100644 hadoop-compat-bench/HdfsCompatBenchIssue.md create mode 100644 hadoop-compat-bench/pom.xml create mode 100644 hadoop-compat-bench/shell/cases/attr.t create mode 100644 hadoop-compat-bench/shell/cases/concat.t create mode 100644 hadoop-compat-bench/shell/cases/copy.t create mode 100644 hadoop-compat-bench/shell/cases/fileinfo.t create mode 100644 hadoop-compat-bench/shell/cases/modification.t create mode 100644 hadoop-compat-bench/shell/cases/move.t create mode 100644 hadoop-compat-bench/shell/cases/read.t create mode 100644 hadoop-compat-bench/shell/cases/remove.t create mode 100644 hadoop-compat-bench/shell/cases/snapshot.t create mode 100644 hadoop-compat-bench/shell/cases/storagePolicy.t create mode 100644 hadoop-compat-bench/shell/misc.sh create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatApiScope.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCase.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseCleanup.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseGroup.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCasePrepare.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseSetUp.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseTearDown.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCommand.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalCaseException.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatReport.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatAcl.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatDirectory.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatServer.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSnapshot.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSymlink.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java create mode 100644 hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java create mode 100644 hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties create mode 100644 hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatDefaultSuites.java create mode 100644 hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatFsCommand.java create mode 100644 hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatInterfaceCoverage.java create mode 100644 hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatShellCommand.java create mode 100644 hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatAclTestCases.java create mode 100644 hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatMkdirTestCases.java create mode 100644 hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatMiniCluster.java create mode 100644 hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java create mode 100644 hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java create mode 100644 hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hadoop.sh create mode 100644 hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hdfs.sh create mode 100644 hadoop-compat-bench/src/test/resources/test-case-simple.t create mode 100644 hadoop-compat-bench/src/test/resources/test-case-skip.t diff --git a/hadoop-compat-bench/HdfsCompatBenchIssue.md b/hadoop-compat-bench/HdfsCompatBenchIssue.md new file mode 100644 index 0000000000000..77c3e2b246f41 --- /dev/null +++ b/hadoop-compat-bench/HdfsCompatBenchIssue.md @@ -0,0 +1,58 @@ + + +# Compatibility Benchmark over HCFS Implementations + +## Background + +Hadoop-Compatible File System (HCFS) is a core conception in big data storage ecosystem, +providing unified interfaces and generally clear semantics, +and has become the de-factor standard for industry storage systems to follow and conform with. +There have been a series of HCFS implementations in Hadoop, +such as S3AFileSystem for Amazon's S3 Object Store, +WASB for Microsoft's Azure Blob Storage and OSS connector for Alibaba Cloud Object Storage, +and more from storage service's providers on their own. + +## Problems + +However, as indicated by [`HCFS Introduction`](hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md), +there is no formal suite to do compatibility assessment of a file system for all such HCFS implementations. +Thus, whether the functionality is well accomplished and meets the core compatible expectations +mainly relies on service provider's own report. +Meanwhile, Hadoop is also developing and new features are continuously contributing to HCFS interfaces +for existing implementations to follow and update, in which case, +Hadoop also needs a tool to quickly assess if these features are supported or not for a specific HCFS implementation. +Besides, the known hadoop command line tool or hdfs shell is used to directly interact with a HCFS storage system, +where most commands correspond to specific HCFS interfaces and work well. +Still, there are cases that are complicated and may not work, like expunge command. +To check such commands for an HCFS, we also need an approach to figure them out. + +## Proposal + +Accordingly, we propose to define a formal HCFS compatibility benchmark and provide corresponding tool +to do the compatibility assessment for an HCFS storage system. +The benchmark and tool should consider both HCFS interfaces and hdfs shell commands. +Different scenarios require different kinds of compatibilities. +For such consideration, we could define different suites in the benchmark. + +## Benefits + +We intend the benchmark and tool to be useful for both storage providers and storage users. +For end users, it can be used to evalute the compatibility level and +determine if the storage system in question is suitable for the required scenarios. +For storage providers, it helps to quickly generate an objective and reliable report +about core functioins of the storage service. +As an instance, if the HCFS got a 100% on a suite named 'tpcds', +it is demonstrated that all functions needed by a tpcds program have been well achieved. +It is also a guide indicating how storage service abilities can map to HCFS interfaces, such as storage class on S3. diff --git a/hadoop-compat-bench/pom.xml b/hadoop-compat-bench/pom.xml new file mode 100644 index 0000000000000..c904f3dfc4fa3 --- /dev/null +++ b/hadoop-compat-bench/pom.xml @@ -0,0 +1,118 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 3.5.0-SNAPSHOT + ../hadoop-project + + hadoop-compat-bench + 3.5.0-SNAPSHOT + jar + + Apache Hadoop Compatibility + Apache Hadoop Compatibility Benchmark + + + + org.apache.hadoop + hadoop-common + provided + + + + org.apache.hadoop + hadoop-hdfs + provided + + + junit + junit + compile + + + + + org.apache.hadoop + hadoop-hdfs-client + test + + + org.apache.hadoop + hadoop-common + test-jar + test + + + org.apache.hadoop + hadoop-hdfs + test-jar + test + + + org.mockito + mockito-core + test + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org.apache.hadoop.compat.HdfsCompatibility + + + + + + + test-jar + + + + + org.apache.hadoop.compat.hdfs.HdfsCompatMiniCluster + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + 3600 + + + + + + src/main/resources + + + shell + + + + diff --git a/hadoop-compat-bench/shell/cases/attr.t b/hadoop-compat-bench/shell/cases/attr.t new file mode 100644 index 0000000000000..00f87cffdb92f --- /dev/null +++ b/hadoop-compat-bench/shell/cases/attr.t @@ -0,0 +1,42 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" +hadoop fs -put "${localDir}/dat" "${baseDir}/" + +echo "1..10" + +# 1. chown +hadoop fs -chown "hadoop-compat-bench-user" "${baseDir}/dat" +expect_out "chown" "user:hadoop-compat-bench-user" hadoop fs -stat "user:%u" "${baseDir}/dat" + +# 2. chgrp +hadoop fs -chgrp "hadoop-compat-bench-group" "${baseDir}/dat" +expect_out "chgrp" "group:hadoop-compat-bench-group" hadoop fs -stat "group:%g" "${baseDir}/dat" + +# 3. chmod +hadoop fs -chmod 777 "${baseDir}/dat" +expect_out "chmod" "perm:777" hadoop fs -stat "perm:%a" "${baseDir}/dat" + +# 4. touch +hadoop fs -touch -m -t "20000615:000000" "${baseDir}/dat" +expect_out "touch" "date:2000-06-.*" hadoop fs -stat "date:%y" "${baseDir}/dat" + +# 5. setfattr +expect_ret "setfattr" 0 hadoop fs -setfattr -n "user.key" -v "value" "${baseDir}/dat" + +# 6. getfattr +expect_out "getfattr" ".*value.*" hadoop fs -getfattr -n "user.key" "${baseDir}/dat" + +# 7. setfacl +expect_ret "setfacl" 0 hadoop fs -setfacl -m "user:foo:---" "${baseDir}/dat" + +# 8. getfacl +expect_out "getfacl" ".*foo.*" hadoop fs -getfacl "${baseDir}/dat" + +# 9. setrep +hadoop fs -setrep 1 "${baseDir}/dat" +expect_out "setrep" "replication:1" hadoop fs -stat "replication:%r" "${baseDir}/dat" + +# 10. checksum +expect_ret "checksum" 0 hadoop fs -checksum "${baseDir}/dat" # TODO diff --git a/hadoop-compat-bench/shell/cases/concat.t b/hadoop-compat-bench/shell/cases/concat.t new file mode 100644 index 0000000000000..22f7cd5c543d7 --- /dev/null +++ b/hadoop-compat-bench/shell/cases/concat.t @@ -0,0 +1,20 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" +hadoop fs -put "${localDir}/dat" "${baseDir}/src1" +hadoop fs -put "${localDir}/dat" "${baseDir}/src2" + +echo "1..3" + +# 1. touchz +hadoop fs -touchz "${baseDir}/dat" +expect_out "touchz" "size:0" hadoop fs -stat "size:%b" "${baseDir}/dat" + +# 2. concat +expect_ret "concat" 0 hadoop fs -concat "${baseDir}/dat" "${baseDir}/src1" "${baseDir}/src2" +# expect_out "size:26" hadoop fs -stat "size:%b" "${baseDir}/dat" + +# 3. getmerge +hadoop fs -getmerge "${baseDir}" "${localDir}/merged" +expect_ret "getmerge" 0 test -s "${localDir}/merged" diff --git a/hadoop-compat-bench/shell/cases/copy.t b/hadoop-compat-bench/shell/cases/copy.t new file mode 100644 index 0000000000000..76d246b30b920 --- /dev/null +++ b/hadoop-compat-bench/shell/cases/copy.t @@ -0,0 +1,17 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" + +echo "1..3" + +# 1. copyFromLocal +expect_ret "copyFromLocal" 0 hadoop fs -copyFromLocal "${localDir}/dat" "${baseDir}/" + +# 2. cp +hadoop fs -cp "${baseDir}/dat" "${baseDir}/dat2" +expect_ret "cp" 0 hadoop fs -test -f "${baseDir}/dat2" + +# 3. copyToLocal +hadoop fs -copyToLocal "${baseDir}/dat2" "${localDir}/" +expect_ret "copyToLocal" 0 test -f "${localDir}/dat2" diff --git a/hadoop-compat-bench/shell/cases/fileinfo.t b/hadoop-compat-bench/shell/cases/fileinfo.t new file mode 100644 index 0000000000000..ad95d9b194146 --- /dev/null +++ b/hadoop-compat-bench/shell/cases/fileinfo.t @@ -0,0 +1,35 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" +hadoop fs -put "${localDir}/dat" "${baseDir}/" +hadoop fs -mkdir -p "${baseDir}/dir/sub" + +echo "1..9" + +# 1. ls +expect_lines "ls" 2 ".*dat.*" ".*dir.*" hadoop fs -ls "${baseDir}" + +# 2. lsr +expect_lines "lsr" 3 ".*dat.*" ".*dir.*" ".*sub.*" hadoop fs -lsr "${baseDir}" + +# 3. count +expect_out "count" ".*13.*" hadoop fs -count "${baseDir}" + +# 4. du +expect_out "du" ".*13.*" hadoop fs -du "${baseDir}" + +# 5. dus +expect_out "dus" ".*13.*" hadoop fs -dus "${baseDir}" + +# 6. df +expect_ret "df" 0 hadoop fs -df "${baseDir}" + +# 7. stat +expect_out "stat" "size:13" hadoop fs -stat "size:%b" "${baseDir}/dat" + +# 8. test +expect_ret "test" 0 hadoop fs -test -f "${baseDir}/dat" + +# 9. find +expect_out "find" ".*dat.*" hadoop fs -find "${baseDir}" -name "dat" -print diff --git a/hadoop-compat-bench/shell/cases/modification.t b/hadoop-compat-bench/shell/cases/modification.t new file mode 100644 index 0000000000000..808fb00823b99 --- /dev/null +++ b/hadoop-compat-bench/shell/cases/modification.t @@ -0,0 +1,18 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" + +echo "1..4" + +# 1. mkdir +expect_ret "mkdir" 0 hadoop fs -mkdir -p "${baseDir}/dir" + +# 2. put +expect_ret "put" 0 hadoop fs -put "${localDir}/dat" "${baseDir}/" + +# 3. appendToFile +expect_ret "appendToFile" 0 hadoop fs -appendToFile "${localDir}/dat" "${baseDir}/dat" + +# 4. truncate +expect_ret "truncate" 0 hadoop fs -truncate 13 "${baseDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/move.t b/hadoop-compat-bench/shell/cases/move.t new file mode 100644 index 0000000000000..3dc4029f9c235 --- /dev/null +++ b/hadoop-compat-bench/shell/cases/move.t @@ -0,0 +1,17 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" + +echo "1..2" + +# 1. moveFromLocal +expect_ret "moveFromLocal" 0 hadoop fs -moveFromLocal "${localDir}/dat" "${baseDir}/" + +# 2. mv +hadoop fs -mv "${baseDir}/dat" "${baseDir}/dat2" +expect_ret "mv" 0 hadoop fs -test -f "${baseDir}/dat2" + +# moveToLocal is not achieved on HDFS +# hadoop fs -moveToLocal "${baseDir}/dat2" "${localDir}/" +# expect_ret "moveToLocal" 0 test -f "${localDir}/dat2" diff --git a/hadoop-compat-bench/shell/cases/read.t b/hadoop-compat-bench/shell/cases/read.t new file mode 100644 index 0000000000000..cbe9124f94137 --- /dev/null +++ b/hadoop-compat-bench/shell/cases/read.t @@ -0,0 +1,23 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" +hadoop fs -put "${localDir}/dat" "${baseDir}/" + +echo "1..5" + +# 1. get +hadoop fs -get "${baseDir}/dat" "${localDir}/" +expect_ret "get" 0 test -f "${localDir}/dat" + +# 2. cat +expect_out "cat" "Hello World!" hadoop fs -cat "${baseDir}/dat" + +# 3. text +expect_out "text" "Hello World!" hadoop fs -text "${baseDir}/dat" + +# 4. head +expect_out "head" "Hello World!" hadoop fs -head "${baseDir}/dat" + +# 5. tail +expect_out "tail" "Hello World!" hadoop fs -tail "${baseDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/remove.t b/hadoop-compat-bench/shell/cases/remove.t new file mode 100644 index 0000000000000..11699b392ea78 --- /dev/null +++ b/hadoop-compat-bench/shell/cases/remove.t @@ -0,0 +1,24 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" +hadoop fs -mkdir -p "${baseDir}/dir/sub" +hadoop fs -put "${localDir}/dat" "${baseDir}/dir/" +hadoop fs -put "${localDir}/dat" "${baseDir}/dir/sub/" + +echo "1..4" + +# 1. rm +hadoop fs -rm -f -skipTrash "${baseDir}/dir/dat" +expect_ret "rm" 1 hadoop fs -test -e "${baseDir}/dir/dat" + +# 2. rmr +hadoop fs -rmr "${baseDir}/dir/sub" +expect_ret "rmr" 1 hadoop fs -test -e "${baseDir}/dir/sub" + +# 3. rmdir +hadoop fs -rmdir "${baseDir}/dir" +expect_ret "rmdir" 1 hadoop fs -test -e "${baseDir}/dir" + +# 4. expunge +expect_ret "expunge" 0 hadoop fs -expunge -immediate -fs "${baseDir}" diff --git a/hadoop-compat-bench/shell/cases/snapshot.t b/hadoop-compat-bench/shell/cases/snapshot.t new file mode 100644 index 0000000000000..92a876cabfc26 --- /dev/null +++ b/hadoop-compat-bench/shell/cases/snapshot.t @@ -0,0 +1,13 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "1..3" + +# 1. createSnapshot +expect_out "createSnapshot" "Created snapshot .*" hdfs dfs -createSnapshot "${snapshotDir}" "s-name" + +# 2. renameSnapshot +expect_ret "renameSnapshot" 0 hdfs dfs -renameSnapshot "${snapshotDir}" "s-name" "d-name" + +# 3. deleteSnapshot +expect_ret "deleteSnapshot" 0 hdfs dfs -deleteSnapshot "${snapshotDir}" "d-name" diff --git a/hadoop-compat-bench/shell/cases/storagePolicy.t b/hadoop-compat-bench/shell/cases/storagePolicy.t new file mode 100644 index 0000000000000..16b00f90d19c5 --- /dev/null +++ b/hadoop-compat-bench/shell/cases/storagePolicy.t @@ -0,0 +1,22 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" +hadoop fs -put "${localDir}/dat" "${baseDir}/" + +echo "1..5" + +# 1. listPolicies +expect_ret "listPolicies" 0 hdfs storagepolicies -Dfs.defaultFS="${baseDir}" -listPolicies + +# 2. setStoragePolicy +expect_out "setStoragePolicy" "Set storage policy ${storagePolicy} .*" hdfs storagepolicies -setStoragePolicy -path "${baseDir}" -policy "${storagePolicy}" + +# 3. getStoragePolicy +expect_out "getStoragePolicy" ".*${storagePolicy}.*" hdfs storagepolicies -getStoragePolicy -path "${baseDir}" + +# 4. satisfyStoragePolicy +expect_out "satisfyStoragePolicy" "Scheduled blocks to move .*" hdfs storagepolicies -satisfyStoragePolicy -path "${baseDir}" + +# 5. unsetStoragePolicy +expect_out "unsetStoragePolicy" "Unset storage policy .*" hdfs storagepolicies -unsetStoragePolicy -path "${baseDir}" diff --git a/hadoop-compat-bench/shell/misc.sh b/hadoop-compat-bench/shell/misc.sh new file mode 100644 index 0000000000000..8c2a27dd199e4 --- /dev/null +++ b/hadoop-compat-bench/shell/misc.sh @@ -0,0 +1,154 @@ +#!/bin/sh + +ntest=1 +fname="$0" + +prepare() { + BASE_URI="${HADOOP_COMPAT_BASE_URI}" + LOCAL_URI="${HADOOP_COMPAT_LOCAL_URI}" + SNAPSHOT_URI="${HADOOP_COMPAT_SNAPSHOT_URI}" + STORAGE_POLICY="${HADOOP_COMPAT_STORAGE_POLICY}" + STDOUT_DIR="${HADOOP_COMPAT_STDOUT_DIR}" + PASS_FILE="${HADOOP_COMPAT_PASS_FILE}" + FAIL_FILE="${HADOOP_COMPAT_FAIL_FILE}" + SKIP_FILE="${HADOOP_COMPAT_SKIP_FILE}" + + baseDir="${BASE_URI}/${fname}" + localDir="${LOCAL_URI}/${fname}" + snapshotDir="${SNAPSHOT_URI}" + storagePolicy="${STORAGE_POLICY}" + stdoutDir="${STDOUT_DIR}/${fname}/stdout" + stderrDir="${STDOUT_DIR}/${fname}/stderr" + mkdir -p "${stdoutDir}" + mkdir -p "${stderrDir}" + mkdir -p "${localDir}" + hadoop fs -mkdir -p "${baseDir}" +} + +expect_ret() { ( + cname="${1}" + shift + expect="${1}" + shift + + stdout="${stdoutDir}/${ntest}" + stderr="${stderrDir}/${ntest}" + "$@" 1>"${stdout}" 2>"${stderr}" + result="$?" + + if should_skip "${stderr}"; then + skip_case "${cname}" + else + if [ X"${result}" = X"${expect}" ]; then + pass_case "${cname}" + else + fail_case "${cname}" + fi + fi +) + ntest=$((ntest + 1)) +} + +expect_out() { ( + cname="${1}" + shift + expect="${1}" + shift + + stdout="${stdoutDir}/${ntest}" + stderr="${stderrDir}/${ntest}" + "$@" 1>"${stdout}" 2>"${stderr}" + + if should_skip "${stderr}"; then + skip_case "${cname}" + else + if grep -Eq '^'"${expect}"'$' "${stdout}"; then + pass_case "${cname}" + else + fail_case "${cname}" + fi + fi +) + ntest=$((ntest + 1)) +} + +expect_lines() { ( + cname="${1}" + shift + lineNum="${1}" + shift + lines=$(expect_lines_parse "${lineNum}" "$@") + shift "${lineNum}" + + stdout="${stdoutDir}/${ntest}" + stderr="${stderrDir}/${ntest}" + "$@" 1>"${stdout}" 2>"${stderr}" + + if should_skip "${stderr}"; then + skip_case "${cname}" + else + set -- ${lines} + lineCount="0" + while read -r line; do + case "${line}" in + *"Found"*"items"*) + continue + ;; + esac + if ! echo "${line}" | grep -Eq '^'"${1}"'$'; then + lineCount="-1" + break + else + lineCount=$((lineCount + 1)) + shift + fi + done <"${stdout}" + if [ "${lineCount}" -eq "${lineNum}" ]; then + pass_case "${cname}" + else + fail_case "${cname}" + fi + fi +) + ntest=$((ntest + 1)) +} + +expect_lines_parse() { + for _ in $(seq 1 "${1}"); do + shift + echo "${1}" + done +} + +is_hadoop_shell() { + if [ X"${1}" = X"hadoop" ] || [ X"${1}" = X"hdfs" ]; then + return 0 + else + return 1 + fi +} + +should_skip() { + if grep -q "Unknown command" "${1}" || grep -q "Illegal option" "${1}"; then + return 0 + else + return 1 + fi +} + +pass_case() { + echo "ok ${ntest}" + echo "${fname} - #${ntest} ${1}" >> "${PASS_FILE}" +} + +fail_case() { + echo "not ok ${ntest}" + echo "${fname} - #${ntest} ${1}" >> "${FAIL_FILE}" +} + +skip_case() { + echo "ok ${ntest}" + echo "${fname} - #${ntest} ${1}" >> "${SKIP_FILE}" +} + +prepare diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java new file mode 100644 index 0000000000000..eced86144d6aa --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; + +import java.util.Random; + +public abstract class AbstractHdfsCompatCase { + private FileSystem fs; + private HdfsCompatEnvironment env; + private Path localPath; + + public AbstractHdfsCompatCase() { + } + + public void init(HdfsCompatEnvironment env) { + this.fs = env.getFileSystem(); + this.env = env; + LocalFileSystem localFs = env.getLocalFileSystem(); + this.localPath = localFs.makeQualified(new Path(env.getLocalTmpDir())); + } + + public FileSystem fs() { + return fs; + } + + public Path getRootPath() { + return this.env.getRoot(); + } + + public Path getBasePath() { + return this.env.getBase(); + } + + public Path getUniquePath() { + return getUniquePath(getBasePath()); + } + + public static Path getUniquePath(Path basePath) { + return new Path(basePath, System.currentTimeMillis() + + "_" + new Random().nextLong()); + } + + public Path makePath(String name) { + return new Path(getUniquePath(), name); + } + + public Path getLocalPath() { + return localPath; + } + + public String getPrivilegedUser() { + return this.env.getPrivilegedUser(); + } + + public String[] getStoragePolicyNames() { + return this.env.getStoragePolicyNames(); + } + + public String getDelegationTokenRenewer() { + return this.env.getDelegationTokenRenewer(); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatApiScope.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatApiScope.java new file mode 100644 index 0000000000000..4e57e22854484 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatApiScope.java @@ -0,0 +1,352 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + + +import org.apache.hadoop.classification.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + + +public class HdfsCompatApiScope { + static final boolean skipNoSuchMethodError = true; + private static final Logger LOG = + LoggerFactory.getLogger(HdfsCompatApiScope.class); + + private final HdfsCompatEnvironment env; + private final HdfsCompatSuite suite; + + public HdfsCompatApiScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + this.env = env; + this.suite = suite; + } + + public HdfsCompatReport apply() { + List groups = collectGroup(); + HdfsCompatReport report = new HdfsCompatReport(); + for (GroupedCase group : groups) { + if (group.methods.isEmpty()) { + continue; + } + final AbstractHdfsCompatCase obj = group.obj; + GroupedResult groupedResult = new GroupedResult(obj, group.methods); + + { // SetUp + groupedResult.setUp = test(group.setUp, obj); + } + if (groupedResult.setUp == Result.OK) { + for (Method method : group.methods) { + CaseResult caseResult = new CaseResult(); + { // Prepare + caseResult.prepareResult = test(group.prepare, obj); + } + if (caseResult.prepareResult == Result.OK) { // Case + caseResult.methodResult = test(method, obj); + } + { // Cleanup + caseResult.cleanupResult = test(group.cleanup, obj); + } + groupedResult.results.put(getCaseName(method), caseResult); + } + } + { // TearDown + groupedResult.tearDown = test(group.tearDown, obj); + } + groupedResult.exportTo(report); + } + return report; + } + + private Result test(Method method, AbstractHdfsCompatCase obj) { + if (method == null) { // Empty method, just OK. + return Result.OK; + } + try { + method.invoke(obj); + return Result.OK; + } catch (InvocationTargetException t) { + Throwable e = t.getCause(); + if (skipNoSuchMethodError && (e instanceof NoSuchMethodError)) { + LOG.warn("Case skipped with method " + method.getName() + + " of class " + obj.getClass(), e); + return Result.SKIP; + } else { + LOG.warn("Case failed with method " + method.getName() + + " of class " + obj.getClass(), e); + return Result.ERROR; + } + } catch (ReflectiveOperationException e) { + LOG.error("Illegal Compatibility Case method " + method.getName() + + " of class " + obj.getClass(), e); + throw new HdfsCompatIllegalCaseException(e.getMessage()); + } + } + + private List collectGroup() { + Class[] cases = suite.getApiCases(); + List groups = new ArrayList<>(); + for (Class cls : cases) { + try { + groups.add(GroupedCase.parse(cls, this.env)); + } catch (ReflectiveOperationException e) { + LOG.error("Illegal Compatibility Group " + cls.getName(), e); + throw new HdfsCompatIllegalCaseException(e.getMessage()); + } + } + return groups; + } + + private static String getCaseName(Method caseMethod) { + HdfsCompatCase annotation = caseMethod.getAnnotation(HdfsCompatCase.class); + assert (annotation != null); + if (annotation.brief().isEmpty()) { + return caseMethod.getName(); + } else { + return caseMethod.getName() + " (" + annotation.brief() + ")"; + } + } + + @VisibleForTesting + public static Set getPublicInterfaces(Class cls) { + Method[] methods = cls.getDeclaredMethods(); + Set publicMethodNames = new HashSet<>(); + for (Method method : methods) { + int modifiers = method.getModifiers(); + if (Modifier.isPublic(modifiers) && !Modifier.isStatic(modifiers)) { + publicMethodNames.add(method.getName()); + } + } + publicMethodNames.remove(cls.getSimpleName()); + publicMethodNames.remove("toString"); + return publicMethodNames; + } + + private static class GroupedCase { + private static final Map> definedMethods = new HashMap<>(); + private final AbstractHdfsCompatCase obj; + private final List methods; + private final Method setUp; + private final Method tearDown; + private final Method prepare; + private final Method cleanup; + + private GroupedCase(AbstractHdfsCompatCase obj, List methods, + Method setUp, Method tearDown, + Method prepare, Method cleanup) { + this.obj = obj; + this.methods = methods; + this.setUp = setUp; + this.tearDown = tearDown; + this.prepare = prepare; + this.cleanup = cleanup; + } + + private static GroupedCase parse(Class cls, + HdfsCompatEnvironment env) + throws ReflectiveOperationException { + Constructor ctor = cls.getConstructor(); + ctor.setAccessible(true); + AbstractHdfsCompatCase caseObj = ctor.newInstance(); + caseObj.init(env); + Method[] declaredMethods = caseObj.getClass().getDeclaredMethods(); + List caseMethods = new ArrayList<>(); + Method setUp = null; + Method tearDown = null; + Method prepare = null; + Method cleanup = null; + for (Method method : declaredMethods) { + if (method.isAnnotationPresent(HdfsCompatCase.class)) { + if (method.isAnnotationPresent(HdfsCompatCaseSetUp.class) || + method.isAnnotationPresent(HdfsCompatCaseTearDown.class) || + method.isAnnotationPresent(HdfsCompatCasePrepare.class) || + method.isAnnotationPresent(HdfsCompatCaseCleanup.class)) { + throw new HdfsCompatIllegalCaseException( + "Compatibility Case must not be annotated by" + + " Prepare/Cleanup or SetUp/TearDown"); + } + HdfsCompatCase annotation = method.getAnnotation(HdfsCompatCase.class); + if (annotation.ifDef().isEmpty()) { + caseMethods.add(method); + } else { + String[] requireDefined = annotation.ifDef().split(","); + if (Arrays.stream(requireDefined).allMatch(GroupedCase::checkDefined)) { + caseMethods.add(method); + } + } + } else { + if (method.isAnnotationPresent(HdfsCompatCaseSetUp.class)) { + if (setUp != null) { + throw new HdfsCompatIllegalCaseException( + "Duplicate SetUp method in Compatibility Case"); + } + setUp = method; + } + if (method.isAnnotationPresent(HdfsCompatCaseTearDown.class)) { + if (tearDown != null) { + throw new HdfsCompatIllegalCaseException( + "Duplicate TearDown method in Compatibility Case"); + } + tearDown = method; + } + if (method.isAnnotationPresent(HdfsCompatCasePrepare.class)) { + if (prepare != null) { + throw new HdfsCompatIllegalCaseException( + "Duplicate Prepare method in Compatibility Case"); + } + prepare = method; + } + if (method.isAnnotationPresent(HdfsCompatCaseCleanup.class)) { + if (cleanup != null) { + throw new HdfsCompatIllegalCaseException( + "Duplicate Cleanup method in Compatibility Case"); + } + cleanup = method; + } + } + } + return new GroupedCase(caseObj, caseMethods, + setUp, tearDown, prepare, cleanup); + } + + private static synchronized boolean checkDefined(String ifDef) { + String[] classAndMethod = ifDef.split("#", 2); + if (classAndMethod.length < 2) { + throw new HdfsCompatIllegalCaseException("ifDef must be with format className#methodName"); + } + final String className = classAndMethod[0]; + final String methodName = classAndMethod[1]; + Set methods = definedMethods.getOrDefault(className, null); + if (methods != null) { + return methods.contains(methodName); + } + Class cls; + try { + cls = Class.forName(className); + } catch (ClassNotFoundException e) { + throw new HdfsCompatIllegalCaseException(e.getMessage()); + } + methods = getPublicInterfaces(cls); + definedMethods.put(className, methods); + return methods.contains(methodName); + } + } + + private static class GroupedResult { + private static final int commonPrefixLength = AbstractHdfsCompatCase.class + .getPackage().getName().length() + ".cases.".length(); + private final String prefix; + private Result setUp; + private Result tearDown; + private final LinkedHashMap results; + + private GroupedResult(AbstractHdfsCompatCase obj, List methods) { + this.prefix = getNamePrefix(obj.getClass()); + this.results = new LinkedHashMap<>(); + for (Method method : methods) { + this.results.put(getCaseName(method), new CaseResult()); + } + } + + private void exportTo(HdfsCompatReport report) { + if (this.setUp == Result.SKIP) { + List cases = results.keySet().stream().map(m -> prefix + m) + .collect(Collectors.toList()); + report.addSkippedCase(cases); + return; + } + if ((this.setUp == Result.ERROR) || (this.tearDown == Result.ERROR)) { + List cases = results.keySet().stream().map(m -> prefix + m) + .collect(Collectors.toList()); + report.addFailedCase(cases); + return; + } + + List passed = new ArrayList<>(); + List failed = new ArrayList<>(); + List skipped = new ArrayList<>(); + for (Map.Entry entry : results.entrySet()) { + final String caseName = prefix + entry.getKey(); + CaseResult result = entry.getValue(); + if (result.prepareResult == Result.SKIP) { + skipped.add(caseName); + continue; + } + if ((result.prepareResult == Result.ERROR) || + (result.cleanupResult == Result.ERROR) || + (result.methodResult == Result.ERROR)) { + failed.add(caseName); + } else if (result.methodResult == Result.OK) { + passed.add(caseName); + } else { + skipped.add(caseName); + } + } + + if (!passed.isEmpty()) { + report.addPassedCase(passed); + } + if (!failed.isEmpty()) { + report.addFailedCase(failed); + } + if (!skipped.isEmpty()) { + report.addSkippedCase(skipped); + } + } + + private static String getNamePrefix(Class cls) { + return (cls.getPackage().getName() + ".").substring(commonPrefixLength) + + getGroupName(cls) + "."; + } + + private static String getGroupName(Class cls) { + if (cls.isAnnotationPresent(HdfsCompatCaseGroup.class)) { + HdfsCompatCaseGroup annotation = cls.getAnnotation(HdfsCompatCaseGroup.class); + if (!annotation.name().isEmpty()) { + return annotation.name(); + } + } + return cls.getSimpleName(); + } + } + + private static class CaseResult { + private Result prepareResult = Result.SKIP; + private Result cleanupResult = Result.SKIP; + private Result methodResult = Result.SKIP; + } + + private enum Result { + OK, + ERROR, + SKIP, + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCase.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCase.java new file mode 100644 index 0000000000000..c62bf6295947d --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCase.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD}) +public @interface HdfsCompatCase { + String brief() default ""; + + String ifDef() default ""; +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseCleanup.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseCleanup.java new file mode 100644 index 0000000000000..a6af788820a12 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseCleanup.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD}) +public @interface HdfsCompatCaseCleanup { +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseGroup.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseGroup.java new file mode 100644 index 0000000000000..53554b0890763 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseGroup.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.TYPE}) +public @interface HdfsCompatCaseGroup { + String name() default ""; +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCasePrepare.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCasePrepare.java new file mode 100644 index 0000000000000..eff03f6414c2b --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCasePrepare.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD}) +public @interface HdfsCompatCasePrepare { +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseSetUp.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseSetUp.java new file mode 100644 index 0000000000000..a80f8595fcb9c --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseSetUp.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD}) +public @interface HdfsCompatCaseSetUp { +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseTearDown.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseTearDown.java new file mode 100644 index 0000000000000..0282958431a73 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseTearDown.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD}) +public @interface HdfsCompatCaseTearDown { +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCommand.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCommand.java new file mode 100644 index 0000000000000..08b40c74ce54f --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCommand.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.compat.suites.HdfsCompatSuiteForAll; +import org.apache.hadoop.compat.suites.HdfsCompatSuiteForShell; +import org.apache.hadoop.compat.suites.HdfsCompatSuiteForTpcds; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.util.HashMap; +import java.util.Map; + +public class HdfsCompatCommand { + private final Path uri; + private final String suiteName; + private final Configuration conf; + private HdfsCompatSuite suite; + private HdfsCompatApiScope api; + private HdfsCompatShellScope shell; + + public HdfsCompatCommand(String uri, String suiteName, Configuration conf) { + this.uri = new Path(uri); + this.suiteName = suiteName.toLowerCase(); + this.conf = conf; + } + + public void initialize() throws ReflectiveOperationException, IOException { + initSuite(); + HdfsCompatEnvironment env = new HdfsCompatEnvironment(uri, conf); + env.init(); + if (hasApiCase()) { + api = new HdfsCompatApiScope(env, suite); + } + if (hasShellCase()) { + shell = new HdfsCompatShellScope(env, suite); + } + } + + public HdfsCompatReport apply() throws Exception { + HdfsCompatReport report = new HdfsCompatReport(uri.toString(), suite); + if (api != null) { + report.merge(api.apply()); + } + if (shell != null) { + report.merge(shell.apply()); + } + return report; + } + + private void initSuite() throws ReflectiveOperationException { + Map defaultSuites = getDefaultSuites(); + this.suite = defaultSuites.getOrDefault(this.suiteName, null); + if (this.suite != null) { + return; + } + String key = "hadoop.compatibility.suite." + this.suiteName + ".classname"; + final String suiteClassName = conf.get(key, null); + if ((suiteClassName == null) || suiteClassName.isEmpty()) { + throw new HdfsCompatIllegalArgumentException( + "cannot get class name for suite " + this.suiteName + + ", configuration " + key + " is not properly set."); + } + Constructor ctor = suiteClassName.getClass().getConstructor(); + ctor.setAccessible(true); + Object suiteObj = ctor.newInstance(); + if (suiteObj instanceof HdfsCompatSuite) { + this.suite = (HdfsCompatSuite) suiteObj; + } else { + throw new HdfsCompatIllegalArgumentException( + "class name " + suiteClassName + " must be an" + + " implementation of " + HdfsCompatSuite.class.getName()); + } + if (suite.getSuiteName() == null || suite.getSuiteName().isEmpty()) { + throw new HdfsCompatIllegalArgumentException( + "suite " + suiteClassName + " suiteName is empty"); + } + for (HdfsCompatSuite defaultSuite : defaultSuites.values()) { + if (suite.getSuiteName().equalsIgnoreCase(defaultSuite.getSuiteName())) { + throw new HdfsCompatIllegalArgumentException( + "suite " + suiteClassName + " suiteName" + + " conflicts with default suite " + defaultSuite.getSuiteName()); + } + } + if (!hasApiCase() && !hasShellCase()) { + throw new HdfsCompatIllegalArgumentException( + "suite " + suiteClassName + " is empty for both API and SHELL"); + } + } + + private boolean hasApiCase() { + return (suite.getApiCases() != null) && + (suite.getApiCases().length > 0); + } + + private boolean hasShellCase() { + return (suite.getShellCases() != null) && + (suite.getShellCases().length > 0); + } + + @VisibleForTesting + protected Map getDefaultSuites() { + Map defaultSuites = new HashMap<>(); + defaultSuites.put("all", new HdfsCompatSuiteForAll()); + defaultSuites.put("shell", new HdfsCompatSuiteForShell()); + defaultSuites.put("tpcds", new HdfsCompatSuiteForTpcds()); + return defaultSuites; + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java new file mode 100644 index 0000000000000..3251bac5868e7 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockStoragePolicySpi; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Collection; +import java.util.Date; +import java.util.List; +import java.util.Random; +import java.util.UUID; +import java.util.stream.Collectors; + +public class HdfsCompatEnvironment { + private static final SimpleDateFormat dateFormat = + new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss"); + private static final Logger LOG = + LoggerFactory.getLogger(HdfsCompatEnvironment.class); + private final Path uri; + private final Configuration conf; + private FileSystem fs; + private LocalFileSystem localFs; + private Path rootDir; + private Path baseDir; + private String defaultLocalDir; + private String[] defaultStoragePolicyNames; + + public HdfsCompatEnvironment(Path uri, Configuration conf) { + this.conf = conf; + this.uri = uri; + } + + public void init() throws IOException { + Date now = new Date(); + String uuid = UUID.randomUUID().toString(); + String uniqueDir = "hadoop-compatibility-benchmark/" + + dateFormat.format(now) + "/" + uuid; + + this.fs = uri.getFileSystem(conf); + this.localFs = FileSystem.getLocal(conf); + this.rootDir = fs.makeQualified(new Path("/")); + this.baseDir = fs.makeQualified(new Path(uri, uniqueDir)); + String tmpdir = getEnvTmpDir(); + if ((tmpdir == null) || tmpdir.isEmpty()) { + LOG.warn("Cannot get valid io.tmpdir, will use /tmp"); + tmpdir = "/tmp"; + } + this.defaultLocalDir = new File(tmpdir, uniqueDir).getAbsolutePath(); + this.defaultStoragePolicyNames = getDefaultStoragePolicyNames(); + } + + public FileSystem getFileSystem() { + return fs; + } + + public LocalFileSystem getLocalFileSystem() { + return localFs; + } + + public Path getRoot() { + return rootDir; + } + + public Path getBase() { + return baseDir; + } + + public String getLocalTmpDir() { + final String scheme = this.uri.toUri().getScheme(); + final String key = "fs." + scheme + ".compatibility.local.tmpdir"; + final String localDir = conf.get(key, null); + return (localDir != null) ? localDir : defaultLocalDir; + } + + public String getPrivilegedUser() { + final String scheme = this.uri.toUri().getScheme(); + final String key = "fs." + scheme + ".compatibility.privileged.user"; + final String privileged = conf.get(key, null); + return (privileged != null) ? privileged : + conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY, + DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); + } + + public String[] getStoragePolicyNames() { + final String scheme = this.uri.toUri().getScheme(); + final String key = "fs." + scheme + ".compatibility.storage.policies"; + final String storagePolicies = conf.get(key, null); + return (storagePolicies != null) ? storagePolicies.split(",") : + defaultStoragePolicyNames; + } + + public String getDelegationTokenRenewer() { + final String scheme = this.uri.toUri().getScheme(); + final String key = "fs." + scheme + ".compatibility.delegation.token.renewer"; + return conf.get(key, ""); + } + + private String getEnvTmpDir() { + final String systemDefault = System.getProperty("java.io.tmpdir"); + if ((systemDefault == null) || systemDefault.isEmpty()) { + return null; + } + String[] tmpDirs = systemDefault.split(",|" + File.pathSeparator); + List validDirs = Arrays.stream(tmpDirs).filter( + s -> (s != null && !s.isEmpty()) + ).collect(Collectors.toList()); + if (validDirs.isEmpty()) { + return null; + } + final String tmpDir = validDirs.get( + new Random().nextInt(validDirs.size())); + return new File(tmpDir).getAbsolutePath(); + } + + private String[] getDefaultStoragePolicyNames() { + Collection policies = null; + try { + policies = fs.getAllStoragePolicies(); + } catch (Exception e) { + LOG.warn("Cannot get storage policy", e); + } + if ((policies == null) || policies.isEmpty()) { + return new String[]{"Hot"}; + } else { + return policies.stream().map(BlockStoragePolicySpi::getName).toArray(String[]::new); + } + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java new file mode 100644 index 0000000000000..e4d18ef954347 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java @@ -0,0 +1,8 @@ +package org.apache.hadoop.compat; + +public class HdfsCompatIllegalArgumentException + extends IllegalArgumentException { + HdfsCompatIllegalArgumentException(String message) { + super(message); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalCaseException.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalCaseException.java new file mode 100644 index 0000000000000..3ecef57f02524 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalCaseException.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import org.apache.hadoop.HadoopIllegalArgumentException; + +public class HdfsCompatIllegalCaseException + extends HadoopIllegalArgumentException { + /** + * Constructs exception with the specified detail message. + * @param message detailed message. + */ + public HdfsCompatIllegalCaseException(final String message) { + super(message); + } +} diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatReport.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatReport.java new file mode 100644 index 0000000000000..f7103f88b6ba3 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatReport.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import java.util.Collection; +import java.util.concurrent.ConcurrentLinkedQueue; + +public class HdfsCompatReport { + private final String uri; + private final HdfsCompatSuite suite; + private final ConcurrentLinkedQueue passed = + new ConcurrentLinkedQueue<>(); + private final ConcurrentLinkedQueue failed = + new ConcurrentLinkedQueue<>(); + private final ConcurrentLinkedQueue skipped = + new ConcurrentLinkedQueue<>(); + + public HdfsCompatReport() { + this(null, null); + } + + public HdfsCompatReport(String uri, HdfsCompatSuite suite) { + this.uri = uri; + this.suite = suite; + } + + public void addPassedCase(Collection cases) { + passed.addAll(cases); + } + + public void addFailedCase(Collection cases) { + failed.addAll(cases); + } + + public void addSkippedCase(Collection cases) { + skipped.addAll(cases); + } + + public void merge(HdfsCompatReport other) { + this.passed.addAll(other.passed); + this.failed.addAll(other.failed); + this.skipped.addAll(other.skipped); + } + + public Collection getPassedCase() { + return passed; + } + + public Collection getFailedCase() { + return failed; + } + + public Collection getSkippedCase() { + return skipped; + } + + public String getUri() { + return this.uri; + } + + public HdfsCompatSuite getSuite() { + return this.suite; + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java new file mode 100644 index 0000000000000..447fee97008b0 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java @@ -0,0 +1,403 @@ +package org.apache.hadoop.compat; + + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.fs.BlockStoragePolicySpi; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; + + +public class HdfsCompatShellScope { + private static final Logger LOG = + LoggerFactory.getLogger(HdfsCompatShellScope.class); + private final HdfsCompatEnvironment env; + private final HdfsCompatSuite suite; + private File stdoutDir = null; + private File passList = null; + private File failList = null; + private File skipList = null; + private Path snapshotPath = null; + private String storagePolicy = null; + private Method disallowSnapshot = null; + + public HdfsCompatShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + this.env = env; + this.suite = suite; + } + + public HdfsCompatReport apply() throws Exception { + File localTmpDir = null; + try { + localTmpDir = new File(this.env.getLocalTmpDir()); + LOG.info("Local tmp dir: " + localTmpDir.getAbsolutePath()); + return runShell(localTmpDir); + } finally { + try { + if (this.disallowSnapshot != null) { + try { + this.disallowSnapshot.invoke(this.env.getFileSystem(), + this.snapshotPath); + } catch (InvocationTargetException e) { + LOG.error("Cannot disallow snapshot", e.getCause()); + } catch (ReflectiveOperationException e) { + LOG.error("Disallow snapshot method is invalid", e); + } + } + } finally { + FileUtils.deleteQuietly(localTmpDir); + } + } + } + + private HdfsCompatReport runShell(File localTmpDir) throws Exception { + File localDir = new File(localTmpDir, "test"); + File scriptDir = new File(localTmpDir, "scripts"); + File confDir = new File(localTmpDir, "hadoop-conf"); + copyScriptsResource(scriptDir); + try { + setShellLogConf(confDir); + } catch (Exception e) { + LOG.error("Cannot set new conf dir", e); + confDir = null; + } + + prepareSnapshot(); + this.storagePolicy = getStoragePolicy(); + String[] confEnv = getEnv(localDir, scriptDir, confDir); + ExecResult result = exec(confEnv, scriptDir); + printLog(result); + return export(); + } + + private void copyScriptsResource(File scriptDir) throws IOException { + Files.createDirectories(new File(scriptDir, "cases").toPath()); + copyResource("/misc.sh", new File(scriptDir, "misc.sh")); + String[] cases = suite.getShellCases(); + for (String res : cases) { + copyResource("/cases/" + res, new File(scriptDir, "cases/" + res)); + } + } + + private void setShellLogConf(File confDir) throws IOException { + final String hadoopHome = System.getenv("HADOOP_HOME"); + final String hadoopConfDir = System.getenv("HADOOP_CONF_DIR"); + if ((hadoopHome == null) || hadoopHome.isEmpty()) { + LOG.error("HADOOP_HOME not configured"); + } + if ((hadoopConfDir == null) || hadoopConfDir.isEmpty()) { + throw new IOException("HADOOP_CONF_DIR not configured"); + } + File srcDir = new File(hadoopConfDir).getAbsoluteFile(); + if (!srcDir.isDirectory()) { + throw new IOException("HADOOP_CONF_DIR is not valid: " + srcDir); + } + + Files.createDirectories(confDir.toPath()); + FileUtils.copyDirectory(srcDir, confDir); + File logConfFile = new File(confDir, "log4j.properties"); + copyResource("/hadoop-compat-bench-log4j.properties", logConfFile, true); + } + + @VisibleForTesting + protected void copyResource(String res, File dst) throws IOException { + copyResource(res, dst, false); + } + + private void copyResource(String res, File dst, boolean overwrite) + throws IOException { + InputStream in = null; + try { + in = this.getClass().getResourceAsStream(res); + if (in == null) { + in = this.suite.getClass().getResourceAsStream(res); + } + if (in == null) { + throw new IOException("Resource not found" + + " during scripts prepare: " + res); + } + + if (dst.exists() && !overwrite) { + throw new IOException("Cannot overwrite existing resource file"); + } + + Files.createDirectories(dst.getParentFile().toPath()); + + byte[] buf = new byte[1024]; + try (OutputStream out = new FileOutputStream(dst)) { + int nRead = in.read(buf); + while (nRead != -1) { + out.write(buf, 0, nRead); + nRead = in.read(buf); + } + } + } finally { + if (in != null) { + in.close(); + } + } + } + + private void prepareSnapshot() { + this.snapshotPath = AbstractHdfsCompatCase.getUniquePath(this.env.getBase()); + Method allowSnapshot = null; + try { + FileSystem fs = this.env.getFileSystem(); + fs.mkdirs(snapshotPath); + Method allowSnapshotMethod = fs.getClass() + .getMethod("allowSnapshot", Path.class); + allowSnapshotMethod.setAccessible(true); + allowSnapshotMethod.invoke(fs, snapshotPath); + allowSnapshot = allowSnapshotMethod; + + Method disallowSnapshotMethod = fs.getClass() + .getMethod("disallowSnapshot", Path.class); + disallowSnapshotMethod.setAccessible(true); + this.disallowSnapshot = disallowSnapshotMethod; + } catch (IOException e) { + LOG.error("Cannot prepare snapshot path", e); + } catch (InvocationTargetException e) { + LOG.error("Cannot allow snapshot", e.getCause()); + } catch (ReflectiveOperationException e) { + LOG.warn("Get admin snapshot methods failed."); + } catch (Exception e) { + LOG.warn("Prepare snapshot failed", e); + } + if (allowSnapshot == null) { + LOG.warn("No allowSnapshot method found."); + } + if (this.disallowSnapshot == null) { + LOG.warn("No disallowSnapshot method found."); + } + } + + private String getStoragePolicy() { + BlockStoragePolicySpi def; + String[] policies; + try { + FileSystem fs = this.env.getFileSystem(); + Path base = this.env.getBase(); + fs.mkdirs(base); + def = fs.getStoragePolicy(base); + policies = env.getStoragePolicyNames(); + } catch (Exception e) { + LOG.warn("Cannot get storage policy", e); + return "Hot"; + } + + List differentPolicies = new ArrayList<>(); + for (String policyName : policies) { + if ((def == null) || !policyName.equalsIgnoreCase(def.getName())) { + differentPolicies.add(policyName); + } + } + if (differentPolicies.isEmpty()) { + final String defPolicyName; + if ((def == null) || (def.getName() == null)) { + defPolicyName = "Hot"; + LOG.warn("No valid storage policy name found, use Hot."); + } else { + defPolicyName = def.getName(); + LOG.warn("There is only one storage policy: " + defPolicyName); + } + return defPolicyName; + } else { + return differentPolicies.get( + new Random().nextInt(differentPolicies.size())); + } + } + + @VisibleForTesting + protected String[] getEnv(File localDir, File scriptDir, File confDir) + throws IOException { + List confEnv = new ArrayList<>(); + final Map environments = System.getenv(); + for (Map.Entry entry : environments.entrySet()) { + confEnv.add(entry.getKey() + "=" + entry.getValue()); + } + if (confDir != null) { + confEnv.add("HADOOP_CONF_DIR=" + confDir.getAbsolutePath()); + } + + String timestamp = String.valueOf(System.currentTimeMillis()); + Path baseUri = new Path(this.env.getBase(), timestamp); + File localUri = new File(localDir, timestamp).getAbsoluteFile(); + File resultDir = new File(localDir, timestamp); + Files.createDirectories(resultDir.toPath()); + this.stdoutDir = new File(resultDir, "output").getAbsoluteFile(); + this.passList = new File(resultDir, "passed").getAbsoluteFile(); + this.failList = new File(resultDir, "failed").getAbsoluteFile(); + this.skipList = new File(resultDir, "skipped").getAbsoluteFile(); + Files.createFile(this.passList.toPath()); + Files.createFile(this.failList.toPath()); + Files.createFile(this.skipList.toPath()); + + final String prefix = "HADOOP_COMPAT_"; + confEnv.add(prefix + "BASE_URI=" + baseUri); + confEnv.add(prefix + "LOCAL_URI=" + localUri.getAbsolutePath()); + confEnv.add(prefix + "SNAPSHOT_URI=" + snapshotPath.toString()); + confEnv.add(prefix + "STORAGE_POLICY=" + storagePolicy); + confEnv.add(prefix + "STDOUT_DIR=" + stdoutDir.getAbsolutePath()); + confEnv.add(prefix + "PASS_FILE=" + passList.getAbsolutePath()); + confEnv.add(prefix + "FAIL_FILE=" + failList.getAbsolutePath()); + confEnv.add(prefix + "SKIP_FILE=" + skipList.getAbsolutePath()); + return confEnv.toArray(new String[0]); + } + + private ExecResult exec(String[] confEnv, File scriptDir) + throws IOException, InterruptedException { + Process process = Runtime.getRuntime().exec( + "prove -r cases", confEnv, scriptDir); + StreamPrinter out = new StreamPrinter(process.getInputStream()); + StreamPrinter err = new StreamPrinter(process.getErrorStream()); + out.start(); + err.start(); + int code = process.waitFor(); + out.join(); + err.join(); + return new ExecResult(code, out.lines, err.lines); + } + + private void printLog(ExecResult execResult) { + LOG.info("Shell prove\ncode: {}\nstdout:\n\t{}\nstderr:\n\t{}", + execResult.code, String.join("\n\t", execResult.out), + String.join("\n\t", execResult.err)); + File casesRoot = new File(stdoutDir, "cases").getAbsoluteFile(); + String[] casesDirList = casesRoot.list(); + if (casesDirList == null) { + LOG.error("stdout/stderr root directory is invalid: " + casesRoot); + return; + } + Arrays.sort(casesDirList, (o1, o2) -> { + if (o1.length() == o2.length()) { + return o1.compareTo(o2); + } else { + return o1.length() - o2.length(); + } + }); + for (String casesDir : casesDirList) { + printCasesLog(new File(casesRoot, casesDir).getAbsoluteFile()); + } + } + + private void printCasesLog(File casesDir) { + File stdout = new File(casesDir, "stdout").getAbsoluteFile(); + File stderr = new File(casesDir, "stderr").getAbsoluteFile(); + File[] stdoutFiles = stdout.listFiles(); + File[] stderrFiles = stderr.listFiles(); + Set cases = new HashSet<>(); + if (stdoutFiles != null) { + for (File c : stdoutFiles) { + cases.add(c.getName()); + } + } + if (stderrFiles != null) { + for (File c : stderrFiles) { + cases.add(c.getName()); + } + } + String[] caseNames = cases.stream().sorted((o1, o2) -> { + if (o1.length() == o2.length()) { + return o1.compareTo(o2); + } else { + return o1.length() - o2.length(); + } + }).toArray(String[]::new); + for (String caseName : caseNames) { + File stdoutFile = new File(stdout, caseName); + File stderrFile = new File(stderr, caseName); + try { + List stdoutLines = stdoutFile.exists() ? + readLines(stdoutFile) : new ArrayList<>(); + List stderrLines = stderrFile.exists() ? + readLines(stderrFile) : new ArrayList<>(); + LOG.info("Shell case {} - #{}\nstdout:\n\t{}\nstderr:\n\t{}", + casesDir.getName(), caseName, + String.join("\n\t", stdoutLines), + String.join("\n\t", stderrLines)); + } catch (Exception e) { + LOG.warn("Read shell stdout or stderr file failed", e); + } + } + } + + private HdfsCompatReport export() throws IOException { + HdfsCompatReport report = new HdfsCompatReport(); + report.addPassedCase(readLines(this.passList)); + report.addFailedCase(readLines(this.failList)); + report.addSkippedCase(readLines(this.skipList)); + return report; + } + + private List readLines(File file) throws IOException { + List lines = new ArrayList<>(); + try (BufferedReader br = new BufferedReader( + new FileReader(file))) { + String line = br.readLine(); + while (line != null) { + lines.add(line); + line = br.readLine(); + } + } + return lines; + } + + private static class StreamPrinter extends Thread { + private final InputStream in; + private final List lines; + + private StreamPrinter(InputStream in) { + this.in = in; + this.lines = new ArrayList<>(); + } + + @Override + public void run() { + try (BufferedReader br = new BufferedReader( + new InputStreamReader(in, StandardCharsets.UTF_8))) { + String line = br.readLine(); + while (line != null) { + this.lines.add(line); + line = br.readLine(); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + private static class ExecResult { + private final int code; + private final List out; + private final List err; + + private ExecResult(int code, List out, List err) { + this.code = code; + this.out = out; + this.err = err; + } + } +} + diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java new file mode 100644 index 0000000000000..03147f51abd9e --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java @@ -0,0 +1,10 @@ +package org.apache.hadoop.compat; + + +public interface HdfsCompatSuite { + String getSuiteName(); + + Class[] getApiCases(); + + String[] getShellCases(); +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java new file mode 100644 index 0000000000000..a3cd0ec84fcc4 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Random; + +public class HdfsCompatUtil { + private static final Logger LOG = + LoggerFactory.getLogger(HdfsCompatUtil.class); + + public static void checkImplementation(ImplementationFunction func) { + try { + func.apply(); + } catch (UnsupportedOperationException e) { + throw e; + } catch (NoSuchMethodError e) { + if (HdfsCompatApiScope.skipNoSuchMethodError) { + throw e; + } else { + throw new UnsupportedOperationException(e); + } + } catch (Throwable ignored) { + } + } + + public static void createFile(FileSystem fs, Path file, long fileLen) + throws IOException { + createFile(fs, file, true, 1024, fileLen, 1048576L, (short) 1); + } + + public static void createFile(FileSystem fs, Path file, byte[] data) + throws IOException { + createFile(fs, file, true, data, 1048576L, (short) 1); + } + + public static void createFile(FileSystem fs, Path file, boolean overwrite, + int bufferSize, long fileLen, long blockSize, + short replication) throws IOException { + assert (bufferSize > 0); + try (FSDataOutputStream out = fs.create(file, overwrite, + bufferSize, replication, blockSize)) { + if (fileLen > 0) { + byte[] toWrite = new byte[bufferSize]; + Random rb = new Random(); + long bytesToWrite = fileLen; + while (bytesToWrite > 0) { + rb.nextBytes(toWrite); + int bytesToWriteNext = (bufferSize < bytesToWrite) ? + bufferSize : (int) bytesToWrite; + out.write(toWrite, 0, bytesToWriteNext); + bytesToWrite -= bytesToWriteNext; + } + } + } + } + + public static void createFile(FileSystem fs, Path file, boolean overwrite, + byte[] data, long blockSize, + short replication) throws IOException { + try (FSDataOutputStream out = fs.create(file, overwrite, + (data.length > 0) ? data.length : 1024, replication, blockSize)) { + if (data.length > 0) { + out.write(data); + } + } + } + + public static byte[] readFileBuffer(FileSystem fs, Path fileName) + throws IOException { + try (ByteArrayOutputStream os = new ByteArrayOutputStream(); + FSDataInputStream in = fs.open(fileName)) { + IOUtils.copyBytes(in, os, 1024, true); + return os.toByteArray(); + } + } + + public static void deleteQuietly(FileSystem fs, Path path, + boolean recursive) { + if (fs != null && path != null) { + try { + fs.delete(path, recursive); + } catch (Throwable e) { + LOG.warn("When deleting {}", path, e); + } + } + } + + public interface ImplementationFunction { + void apply() throws Exception; + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java new file mode 100644 index 0000000000000..5e4942566ec5f --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java @@ -0,0 +1,241 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.PrintStream; +import java.security.PrivilegedExceptionAction; +import java.util.Collection; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.shell.CommandFormat; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.VersionInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tool for triggering a compatibility report + * for a specific FileSystem implementation. + */ +public class HdfsCompatibility extends Configured implements Tool { + private static final Logger LOG = + LoggerFactory.getLogger(HdfsCompatibility.class); + + private static final String DESCRIPTION = "hdfs compatibility" + + " -uri [-suite ]:\n" + + "\tTrigger a compatibility check progress for a specific" + + " Storage System implementation.\n" + + "\tA compatibility report is generated after the process finished," + + " showing how many interfaces/functions are implemented" + + " and compatible with HDFS definition.\n" + + "\t-uri is required to determine the Storage System implementation.\n" + + "\t-suite is optional for limiting the check to a subset." + + " For example, 'shell' means command line only."; + + private final PrintStream out; // Stream for printing command output + private final PrintStream err; // Stream for printing error + private String uri = null; + private String suite = null; + private String output = null; + + HdfsCompatibility(Configuration conf) { + this(conf, System.out, System.err); + } + + HdfsCompatibility(Configuration conf, PrintStream out, PrintStream err) { + super(conf); + this.out = out; + this.err = err; + } + + @Override + public int run(final String[] args) throws Exception { + try { + return UserGroupInformation.getCurrentUser().doAs( + new PrivilegedExceptionAction() { + @Override + public Integer run() { + return runImpl(args); + } + }); + } catch (InterruptedException e) { + throw new IOException(e); + } + } + + /** + * Main method that runs the tool for given arguments. + * + * @param args arguments + * @return return status of the command + */ + private int runImpl(String[] args) { + if (isHelp(args)) { + printUsage(); + return 0; + } + try { + parseArgs(args); + return doRun(); + } catch (Exception e) { + printError(e.getMessage()); + return -1; + } + } + + private int doRun() throws Exception { + HdfsCompatCommand cmd = new HdfsCompatCommand(uri, suite, getConf()); + cmd.initialize(); + HdfsCompatReport report = cmd.apply(); + OutputStream out = null; + try { + if (this.output != null) { + out = new FileOutputStream(new File(this.output)); + } + } catch (Exception e) { + LOG.error("Create output file failed", e); + out = null; + } + try { + printReport(report, out); + } finally { + IOUtils.closeStream(out); + } + return 0; + } + + private boolean isHelp(String[] args) { + return (args == null) || (args.length == 0) || ( + (args.length == 1) && ( + args[0].equalsIgnoreCase("-h") || + args[0].equalsIgnoreCase("--help")) + ); + } + + private void parseArgs(String[] args) { + CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE); + cf.addOptionWithValue("uri"); + cf.addOptionWithValue("suite"); + cf.addOptionWithValue("output"); + cf.parse(args, 0); + this.uri = cf.getOptValue("uri"); + this.suite = cf.getOptValue("suite"); + this.output = cf.getOptValue("output"); + if (isEmpty(this.uri)) { + throw new HdfsCompatIllegalArgumentException("-uri is not specified."); + } + if (isEmpty(this.suite)) { + this.suite = "ALL"; + } + } + + private boolean isEmpty(final String value) { + return (value == null) || value.isEmpty(); + } + + void printError(String message) { + err.println(message); + } + + void printOut(String message) { + out.println(message); + } + + void printReport(HdfsCompatReport report, OutputStream out) + throws IOException { + StringBuilder buffer = new StringBuilder(); + { // Line 1: + buffer.append("Hadoop Compatibility Report for "); + buffer.append(report.getSuite().getSuiteName()); + buffer.append(":\n"); + } + { // Line 2: + long passed = report.getPassedCase().size(); + long failed = report.getFailedCase().size(); + String percent = (failed == 0) ? "100" : String.format("%.2f", + ((double) passed) / ((double) (passed + failed)) * 100); + buffer.append("\t"); + buffer.append(percent); + buffer.append("%, PASSED "); + buffer.append(passed); + buffer.append(" OVER "); + buffer.append(passed + failed); + buffer.append("\n"); + } + { // Line 3: + buffer.append("\tURI: "); + buffer.append(report.getUri()); + if (report.getSuite() != null) { + buffer.append(" (suite: "); + buffer.append(report.getSuite().getClass().getName()); + buffer.append(")"); + } + buffer.append("\n"); + } + { // Line 4: + buffer.append("\tHadoop Version as Baseline: "); + buffer.append(VersionInfo.getVersion()); + } + final String shortMessage = buffer.toString(); + printOut(shortMessage); + + if (out != null) { + out.write(shortMessage.getBytes()); + BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out)); + writer.newLine(); + writer.write("PASSED CASES:"); + writer.newLine(); + Collection cases = report.getPassedCase(); + for (String c : cases) { + writer.write('\t'); + writer.write(c); + writer.newLine(); + writer.flush(); + } + writer.write("FAILED CASES:"); + writer.newLine(); + cases = report.getFailedCase(); + for (String c : cases) { + writer.write('\t'); + writer.write(c); + writer.newLine(); + writer.flush(); + } + writer.flush(); + } + } + + private void printUsage() { + printError(DESCRIPTION); + } + + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new HdfsCompatibility(new Configuration()), args); + System.exit(res); + } +} diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatAcl.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatAcl.java new file mode 100644 index 0000000000000..2463d5c4131e0 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatAcl.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; +import org.junit.Assert; + +import java.io.IOException; +import java.util.List; + +@HdfsCompatCaseGroup(name = "ACL") +public class HdfsCompatAcl extends AbstractHdfsCompatCase { + private static final String initFileAcl = + "user::rwx,group::rwx,other::rwx,user:foo:rwx"; + private static final String initDirAcl = + "default:user::rwx,default:group::rwx,default:other::rwx"; + private Path dir; + private Path file; + + @HdfsCompatCasePrepare + public void prepare() throws IOException { + this.dir = makePath("dir"); + this.file = new Path(this.dir, "file"); + HdfsCompatUtil.createFile(fs(), this.file, 0); + List entries = AclEntry.parseAclSpec(initDirAcl, true); + fs().setAcl(dir, entries); + entries = AclEntry.parseAclSpec(initFileAcl, true); + fs().setAcl(file, entries); + } + + @HdfsCompatCaseCleanup + public void cleanup() throws IOException { + HdfsCompatUtil.deleteQuietly(fs(), this.dir, true); + } + + @HdfsCompatCase + public void modifyAclEntries() throws IOException { + List entries = AclEntry.parseAclSpec("user:foo:---", true); + fs().modifyAclEntries(file, entries); + List acls = fs().getAclStatus(file).getEntries(); + long count = 0; + for (AclEntry acl : acls) { + if ("foo".equals(acl.getName())) { + ++count; + Assert.assertEquals(FsAction.NONE, acl.getPermission()); + } + } + Assert.assertEquals(1, count); + } + + @HdfsCompatCase + public void removeAclEntries() throws IOException { + List entries = AclEntry.parseAclSpec("user:bar:---", true); + fs().modifyAclEntries(file, entries); + entries = AclEntry.parseAclSpec("user:foo:---", true); + fs().removeAclEntries(file, entries); + List acls = fs().getAclStatus(file).getEntries(); + Assert.assertTrue(acls.stream().noneMatch(e -> "foo".equals(e.getName()))); + Assert.assertTrue(acls.stream().anyMatch(e -> "bar".equals(e.getName()))); + } + + @HdfsCompatCase + public void removeDefaultAcl() throws IOException { + fs().removeDefaultAcl(dir); + List acls = fs().getAclStatus(dir).getEntries(); + Assert.assertTrue(acls.stream().noneMatch( + e -> (e.getScope() == AclEntryScope.DEFAULT))); + } + + @HdfsCompatCase + public void removeAcl() throws IOException { + fs().removeAcl(file); + List acls = fs().getAclStatus(file).getEntries(); + Assert.assertTrue(acls.stream().noneMatch(e -> "foo".equals(e.getName()))); + } + + @HdfsCompatCase + public void setAcl() throws IOException { + List acls = fs().getAclStatus(file).getEntries(); + Assert.assertTrue(acls.stream().anyMatch(e -> "foo".equals(e.getName()))); + } + + @HdfsCompatCase + public void getAclStatus() throws IOException { + AclStatus status = fs().getAclStatus(dir); + Assert.assertFalse(status.getOwner().isEmpty()); + Assert.assertFalse(status.getGroup().isEmpty()); + List acls = status.getEntries(); + Assert.assertTrue(acls.stream().anyMatch(e -> + e.getScope() == AclEntryScope.DEFAULT)); + + status = fs().getAclStatus(file); + Assert.assertFalse(status.getOwner().isEmpty()); + Assert.assertFalse(status.getGroup().isEmpty()); + acls = status.getEntries(); + Assert.assertTrue(acls.stream().anyMatch(e -> + e.getScope() == AclEntryScope.ACCESS)); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java new file mode 100644 index 0000000000000..26863a8d5fe12 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.*; +import org.apache.hadoop.io.IOUtils; +import org.junit.Assert; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; + +@HdfsCompatCaseGroup(name = "Create") +public class HdfsCompatCreate extends AbstractHdfsCompatCase { + private Path path; + + @HdfsCompatCasePrepare + public void prepare() { + this.path = makePath("path"); + } + + @HdfsCompatCaseCleanup + public void cleanup() { + HdfsCompatUtil.deleteQuietly(fs(), this.path, true); + } + + @HdfsCompatCase + public void mkdirs() throws IOException { + fs().mkdirs(path); + Assert.assertTrue(fs().exists(path)); + } + + @HdfsCompatCase + public void create() throws IOException { + FSDataOutputStream out = null; + try { + out = fs().create(path, true); + Assert.assertTrue(fs().exists(path)); + } finally { + IOUtils.closeStream(out); + } + } + + @HdfsCompatCase + public void createNonRecursive() { + Path file = new Path(path, "file-no-parent"); + try { + fs().createNonRecursive(file, true, 1024, (short) 1, 1048576, null); + Assert.fail("Should fail since parent does not exist"); + } catch (IOException ignored) { + } + } + + @HdfsCompatCase + public void createNewFile() throws IOException { + HdfsCompatUtil.createFile(fs(), path, 0); + Assert.assertFalse(fs().createNewFile(path)); + } + + @HdfsCompatCase + public void append() throws IOException { + HdfsCompatUtil.createFile(fs(), path, 128); + FSDataOutputStream out = null; + byte[] data = new byte[64]; + try { + out = fs().append(path); + out.write(data); + out.close(); + out = null; + FileStatus fileStatus = fs().getFileStatus(path); + Assert.assertEquals(128 + 64, fileStatus.getLen()); + } finally { + IOUtils.closeStream(out); + } + } + + @HdfsCompatCase + public void createFile() throws IOException { + FSDataOutputStream out = null; + fs().mkdirs(path); + final Path file = new Path(path, "file"); + try { + FSDataOutputStreamBuilder builder = fs().createFile(file); + out = builder.blockSize(1048576 * 2).build(); + out.write("Hello World!".getBytes()); + out.close(); + out = null; + Assert.assertTrue(fs().exists(file)); + } finally { + IOUtils.closeStream(out); + } + } + + @HdfsCompatCase + public void appendFile() throws IOException { + HdfsCompatUtil.createFile(fs(), path, 128); + FSDataOutputStream out = null; + byte[] data = new byte[64]; + try { + FSDataOutputStreamBuilder builder = fs().appendFile(path); + out = builder.build(); + out.write(data); + out.close(); + out = null; + FileStatus fileStatus = fs().getFileStatus(path); + Assert.assertEquals(128 + 64, fileStatus.getLen()); + } finally { + IOUtils.closeStream(out); + } + } + + @HdfsCompatCase + public void createMultipartUploader() throws Exception { + MultipartUploader mpu = null; + UploadHandle handle = null; + try { + MultipartUploaderBuilder builder = fs().createMultipartUploader(path); + final Path file = fs().makeQualified(new Path(path, "file")); + mpu = builder.blockSize(1048576).build(); + CompletableFuture future = mpu.startUpload(file); + handle = future.get(); + } finally { + if (mpu != null) { + if (handle != null) { + try { + mpu.abort(handle, path); + } catch (Throwable ignored) { + } + } + try { + mpu.abortUploadsUnderPath(path); + } catch (Throwable ignored) { + } + } + } + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatDirectory.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatDirectory.java new file mode 100644 index 0000000000000..bb9a9d0b9291e --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatDirectory.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.*; +import org.junit.Assert; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@HdfsCompatCaseGroup(name = "Directory") +public class HdfsCompatDirectory extends AbstractHdfsCompatCase { + private static final int fileLen = 128; + private Path dir = null; + private Path file = null; + + @HdfsCompatCasePrepare + public void prepare() throws IOException { + this.dir = makePath("dir"); + this.file = new Path(this.dir, "file"); + HdfsCompatUtil.createFile(fs(), file, fileLen); + } + + @HdfsCompatCaseCleanup + public void cleanup() throws IOException { + HdfsCompatUtil.deleteQuietly(fs(), this.dir, true); + } + + @HdfsCompatCase + public void isDirectory() throws IOException { + Assert.assertTrue(fs().isDirectory(dir)); + } + + @HdfsCompatCase + public void listStatus() throws IOException { + FileStatus[] files = fs().listStatus(dir); + Assert.assertNotNull(files); + Assert.assertEquals(1, files.length); + Assert.assertEquals(file.getName(), files[0].getPath().getName()); + } + + @HdfsCompatCase + public void globStatus() throws IOException { + FileStatus[] files = fs().globStatus(new Path(dir, "*ile")); + Assert.assertNotNull(files); + Assert.assertEquals(1, files.length); + Assert.assertEquals(file.getName(), files[0].getPath().getName()); + } + + @HdfsCompatCase + public void listLocatedStatus() throws IOException { + RemoteIterator locatedFileStatuses = + fs().listLocatedStatus(dir); + Assert.assertNotNull(locatedFileStatuses); + List files = new ArrayList<>(); + while (locatedFileStatuses.hasNext()) { + files.add(locatedFileStatuses.next()); + } + Assert.assertEquals(1, files.size()); + LocatedFileStatus fileStatus = files.get(0); + Assert.assertEquals(file.getName(), fileStatus.getPath().getName()); + } + + @HdfsCompatCase + public void listStatusIterator() throws IOException { + RemoteIterator fileStatuses = fs().listStatusIterator(dir); + Assert.assertNotNull(fileStatuses); + List files = new ArrayList<>(); + while (fileStatuses.hasNext()) { + files.add(fileStatuses.next()); + } + Assert.assertEquals(1, files.size()); + FileStatus fileStatus = files.get(0); + Assert.assertEquals(file.getName(), fileStatus.getPath().getName()); + } + + @HdfsCompatCase + public void listFiles() throws IOException { + RemoteIterator iter = fs().listFiles(dir, true); + Assert.assertNotNull(iter); + List files = new ArrayList<>(); + while (iter.hasNext()) { + files.add(iter.next()); + } + Assert.assertEquals(1, files.size()); + } + + @HdfsCompatCase + public void listCorruptFileBlocks() throws IOException { + RemoteIterator iter = fs().listCorruptFileBlocks(dir); + Assert.assertNotNull(iter); + Assert.assertFalse(iter.hasNext()); // No corrupted file + } + + @HdfsCompatCase + public void getContentSummary() throws IOException { + ContentSummary summary = fs().getContentSummary(dir); + Assert.assertEquals(1, summary.getFileCount()); + Assert.assertEquals(1, summary.getDirectoryCount()); + Assert.assertEquals(fileLen, summary.getLength()); + } + + @HdfsCompatCase + public void getUsed() throws IOException { + long used = fs().getUsed(dir); + Assert.assertTrue(used >= fileLen); + } + + @HdfsCompatCase + public void getQuotaUsage() throws IOException { + QuotaUsage usage = fs().getQuotaUsage(dir); + Assert.assertEquals(2, usage.getFileAndDirectoryCount()); + } + + @HdfsCompatCase + public void setQuota() throws IOException { + fs().setQuota(dir, 1048576L, 1073741824L); + QuotaUsage usage = fs().getQuotaUsage(dir); + Assert.assertEquals(1048576L, usage.getQuota()); + } + + @HdfsCompatCase + public void setQuotaByStorageType() throws IOException { + fs().setQuotaByStorageType(dir, StorageType.DISK, 1048576L); + QuotaUsage usage = fs().getQuotaUsage(dir); + Assert.assertEquals(1048576L, usage.getTypeQuota(StorageType.DISK)); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java new file mode 100644 index 0000000000000..b4e4d3814533a --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java @@ -0,0 +1,240 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.DataChecksum; +import org.junit.Assert; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.Random; + +@HdfsCompatCaseGroup(name = "File") +public class HdfsCompatFile extends AbstractHdfsCompatCase { + private static final int fileLen = 128; + private static final long blockSize = 1048576; + private static final short replication = 1; + private Path file = null; + + @HdfsCompatCasePrepare + public void prepare() throws IOException { + this.file = makePath("file"); + HdfsCompatUtil.createFile(fs(), this.file, true, + 1024, fileLen, blockSize, replication); + } + + @HdfsCompatCaseCleanup + public void cleanup() throws IOException { + HdfsCompatUtil.deleteQuietly(fs(), this.file, true); + } + + @HdfsCompatCase + public void getFileStatus() throws IOException { + FileStatus fileStatus = fs().getFileStatus(file); + Assert.assertNotNull(fileStatus); + Assert.assertEquals(file.getName(), fileStatus.getPath().getName()); + } + + @HdfsCompatCase + public void exists() throws IOException { + Assert.assertTrue(fs().exists(file)); + } + + @HdfsCompatCase + public void isFile() throws IOException { + Assert.assertTrue(fs().isFile(file)); + } + + @HdfsCompatCase + public void getLength() throws IOException { + Assert.assertEquals(fileLen, fs().getLength(file)); + } + + @HdfsCompatCase(brief = "arbitrary blockSize") + public void getBlockSize() throws IOException { + Assert.assertEquals(blockSize, fs().getBlockSize(file)); + } + + @HdfsCompatCase + public void renameFile() throws IOException { + Path dst = new Path(file.toString() + "_rename_dst"); + fs().rename(file, dst); + Assert.assertFalse(fs().exists(file)); + Assert.assertTrue(fs().exists(dst)); + } + + @HdfsCompatCase + public void deleteFile() throws IOException { + fs().delete(file, true); + Assert.assertFalse(fs().exists(file)); + } + + @HdfsCompatCase + public void deleteOnExit() throws IOException { + FileSystem newFs = FileSystem.newInstance(fs().getUri(), fs().getConf()); + newFs.deleteOnExit(file); + newFs.close(); + Assert.assertFalse(fs().exists(file)); + } + + @HdfsCompatCase + public void cancelDeleteOnExit() throws IOException { + FileSystem newFs = FileSystem.newInstance(fs().getUri(), fs().getConf()); + newFs.deleteOnExit(file); + newFs.cancelDeleteOnExit(file); + newFs.close(); + Assert.assertTrue(fs().exists(file)); + } + + @HdfsCompatCase + public void truncate() throws IOException, InterruptedException { + int newLen = new Random().nextInt(fileLen); + boolean finished = fs().truncate(file, newLen); + while (!finished) { + Thread.sleep(1000); + finished = fs().truncate(file, newLen); + } + FileStatus fileStatus = fs().getFileStatus(file); + Assert.assertEquals(newLen, fileStatus.getLen()); + } + + @HdfsCompatCase + public void setOwner() throws Exception { + final String owner = "test_" + new Random().nextInt(1024); + final String group = "test_" + new Random().nextInt(1024); + final String privileged = getPrivilegedUser(); + UserGroupInformation.createRemoteUser(privileged).doAs( + (PrivilegedExceptionAction) () -> { + FileSystem.newInstance(fs().getUri(), fs().getConf()) + .setOwner(file, owner, group); + return null; + } + ); + FileStatus fileStatus = fs().getFileStatus(file); + Assert.assertEquals(owner, fileStatus.getOwner()); + Assert.assertEquals(group, fileStatus.getGroup()); + } + + @HdfsCompatCase + public void setTimes() throws IOException { + final long atime = System.currentTimeMillis(); + final long mtime = atime - 1000; + fs().setTimes(file, mtime, atime); + FileStatus fileStatus = fs().getFileStatus(file); + Assert.assertEquals(mtime, fileStatus.getModificationTime()); + Assert.assertEquals(atime, fileStatus.getAccessTime()); + } + + @HdfsCompatCase + public void concat() throws IOException { + final Path dir = makePath("dir"); + try { + final Path src = new Path(dir, "src"); + final Path dst = new Path(dir, "dst"); + HdfsCompatUtil.createFile(fs(), src, 64); + HdfsCompatUtil.createFile(fs(), dst, 16); + fs().concat(dst, new Path[]{src}); + FileStatus fileStatus = fs().getFileStatus(dst); + Assert.assertEquals(16 + 64, fileStatus.getLen()); + } finally { + HdfsCompatUtil.deleteQuietly(fs(), dir, true); + } + } + + @HdfsCompatCase + public void getFileChecksum() throws IOException { + FileChecksum checksum = fs().getFileChecksum(file); + Assert.assertNotNull(checksum); + Assert.assertNotNull(checksum.getChecksumOpt()); + DataChecksum.Type type = checksum.getChecksumOpt().getChecksumType(); + Assert.assertNotEquals(DataChecksum.Type.NULL, type); + } + + @HdfsCompatCase + public void getFileBlockLocations() throws IOException { + BlockLocation[] locations = fs().getFileBlockLocations(file, 0, fileLen); + Assert.assertTrue(locations.length >= 1); + BlockLocation location = locations[0]; + Assert.assertTrue(location.getLength() > 0); + } + + @HdfsCompatCase + public void getReplication() throws IOException { + Assert.assertEquals(replication, fs().getReplication(file)); + } + + @HdfsCompatCase(brief = "arbitrary replication") + public void setReplication() throws IOException { + fs().setReplication(this.file, (short) 2); + Assert.assertEquals(2, fs().getReplication(this.file)); + } + + @HdfsCompatCase + public void getPathHandle() throws IOException { + FileStatus status = fs().getFileStatus(file); + PathHandle handle = fs().getPathHandle(status, Options.HandleOpt.path()); + final int maxReadLen = Math.min(fileLen, 4096); + byte[] data = new byte[maxReadLen]; + try (FSDataInputStream in = fs().open(handle, 1024)) { + in.readFully(data); + } + } + + @HdfsCompatCase + public void open() throws IOException { + FSDataInputStream in = null; + try { + in = fs().open(file); + in.read(); + } finally { + IOUtils.closeStream(in); + } + } + + @HdfsCompatCase + public void openFile() throws Exception { + FSDataInputStream in = null; + try { + FutureDataInputStreamBuilder builder = fs().openFile(file); + in = builder.build().get(); + } finally { + IOUtils.closeStream(in); + } + } + + @HdfsCompatCase + public void access() throws IOException { + fs().access(file, FsAction.READ); + } + + @HdfsCompatCase + public void setPermission() throws IOException { + fs().setPermission(file, FsPermission.createImmutable((short) 511)); + try { + fs().access(file, FsAction.ALL); + Assert.fail("Should not have write permission"); + } catch (Throwable ignored) { + } + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java new file mode 100644 index 0000000000000..8ff57e2a30a23 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.Assert; + +import java.io.IOException; +import java.util.Random; + +@HdfsCompatCaseGroup(name = "Local") +public class HdfsCompatLocal extends AbstractHdfsCompatCase { + private static final int fileLen = 128; + private LocalFileSystem localFs; + private Path localBasePath; + private Path localSrc; + private Path localDst; + private Path src; + private Path dst; + + @HdfsCompatCaseSetUp + public void setUp() throws IOException { + localFs = FileSystem.getLocal(fs().getConf()); + localBasePath = localFs.makeQualified(getLocalPath()); + } + + @HdfsCompatCaseTearDown + public void tearDown() { + HdfsCompatUtil.deleteQuietly(localFs, localBasePath, true); + } + + @HdfsCompatCasePrepare + public void prepare() throws IOException { + final String unique = System.currentTimeMillis() + + "_" + new Random().nextLong() + "/"; + this.localSrc = new Path(localBasePath, unique + "src"); + this.localDst = new Path(localBasePath, unique + "dst"); + this.src = new Path(getBasePath(), unique + "src"); + this.dst = new Path(getBasePath(), unique + "dst"); + HdfsCompatUtil.createFile(localFs, this.localSrc, fileLen); + HdfsCompatUtil.createFile(fs(), this.src, fileLen); + } + + @HdfsCompatCaseCleanup + public void cleanup() { + HdfsCompatUtil.deleteQuietly(fs(), this.src.getParent(), true); + HdfsCompatUtil.deleteQuietly(localFs, this.localSrc.getParent(), true); + } + + @HdfsCompatCase + public void copyFromLocalFile() throws IOException { + fs().copyFromLocalFile(localSrc, dst); + Assert.assertTrue(localFs.exists(localSrc)); + Assert.assertTrue(fs().exists(dst)); + } + + @HdfsCompatCase + public void moveFromLocalFile() throws IOException { + fs().moveFromLocalFile(localSrc, dst); + Assert.assertFalse(localFs.exists(localSrc)); + Assert.assertTrue(fs().exists(dst)); + } + + @HdfsCompatCase + public void copyToLocalFile() throws IOException { + fs().copyToLocalFile(src, localDst); + Assert.assertTrue(fs().exists(src)); + Assert.assertTrue(localFs.exists(localDst)); + } + + @HdfsCompatCase + public void moveToLocalFile() throws IOException { + fs().moveToLocalFile(src, localDst); + Assert.assertFalse(fs().exists(src)); + Assert.assertTrue(localFs.exists(localDst)); + } + + @HdfsCompatCase + public void startLocalOutput() throws IOException { + Path local = fs().startLocalOutput(dst, localDst); + HdfsCompatUtil.createFile(localFs, local, 16); + Assert.assertTrue(localFs.exists(local)); + } + + @HdfsCompatCase + public void completeLocalOutput() throws IOException { + Path local = fs().startLocalOutput(dst, localDst); + HdfsCompatUtil.createFile(localFs, local, 16); + fs().completeLocalOutput(dst, localDst); + Assert.assertTrue(fs().exists(dst)); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatServer.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatServer.java new file mode 100644 index 0000000000000..ccd60f23eaf73 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatServer.java @@ -0,0 +1,227 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + + +import org.apache.hadoop.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatCaseGroup; +import org.apache.hadoop.compat.HdfsCompatUtil; +import org.apache.hadoop.fs.*; +import org.junit.Assert; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +@HdfsCompatCaseGroup(name = "Server") +public class HdfsCompatServer extends AbstractHdfsCompatCase { + private void isValid(String name) { + Assert.assertNotNull(name); + Assert.assertFalse(name.isEmpty()); + } + + @HdfsCompatCase + public void initialize() throws Exception { + Class cls = FileSystem.getFileSystemClass( + getBasePath().toUri().getScheme(), fs().getConf()); + Constructor ctor = + cls.getDeclaredConstructor(); + ctor.setAccessible(true); + FileSystem newFs = ctor.newInstance(); + newFs.initialize(fs().getUri(), fs().getConf()); + } + + @HdfsCompatCase + public void getScheme() { + final String scheme = fs().getScheme(); + isValid(scheme); + } + + @HdfsCompatCase + public void getUri() { + URI uri = fs().getUri(); + isValid(uri.getScheme()); + } + + @HdfsCompatCase + public void getCanonicalServiceName() { + final String serviceName = fs().getCanonicalServiceName(); + isValid(serviceName); + } + + @HdfsCompatCase + public void getName() { + final String name = fs().getName(); + isValid(name); + } + + @HdfsCompatCase + public void makeQualified() { + Path path = fs().makeQualified(makePath("file")); + isValid(path.toUri().getScheme()); + } + + @HdfsCompatCase + public void getChildFileSystems() { + fs().getChildFileSystems(); + } + + @HdfsCompatCase + public void resolvePath() throws IOException { + FileSystem.enableSymlinks(); + Path file = makePath("file"); + Path link = new Path(file.toString() + "_link"); + HdfsCompatUtil.createFile(fs(), file, 0); + fs().createSymlink(file, link, true); + Path resolved = fs().resolvePath(link); + Assert.assertEquals(file.getName(), resolved.getName()); + } + + @HdfsCompatCase + public void getHomeDirectory() { + final Path home = fs().getHomeDirectory(); + isValid(home.toString()); + } + + @HdfsCompatCase + public void setWorkingDirectory() throws IOException { + FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf()); + Path work = makePath("work"); + another.setWorkingDirectory(work); + Assert.assertEquals(work.getName(), + another.getWorkingDirectory().getName()); + } + + @HdfsCompatCase + public void getWorkingDirectory() { + Path work = fs().getWorkingDirectory(); + isValid(work.toString()); + } + + @HdfsCompatCase + public void close() throws IOException { + FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf()); + another.close(); + } + + @HdfsCompatCase + public void getDefaultBlockSize() { + Assert.assertTrue(fs().getDefaultBlockSize(getBasePath()) >= 0); + } + + @HdfsCompatCase + public void getDefaultReplication() { + Assert.assertTrue(fs().getDefaultReplication(getBasePath()) >= 0); + } + + @HdfsCompatCase + public void getStorageStatistics() { + Assert.assertNotNull(fs().getStorageStatistics()); + } + + // @HdfsCompatCase + public void setVerifyChecksum() { + } + + // @HdfsCompatCase + public void setWriteChecksum() { + } + + @HdfsCompatCase + public void getDelegationToken() throws IOException { + Assert.assertNotNull(fs().getDelegationToken(getDelegationTokenRenewer())); + } + + @HdfsCompatCase + public void getAdditionalTokenIssuers() throws IOException { + Assert.assertNotNull(fs().getAdditionalTokenIssuers()); + } + + @HdfsCompatCase + public void getServerDefaults() throws IOException { + FsServerDefaults d = fs().getServerDefaults(getBasePath()); + Assert.assertTrue(d.getBlockSize() >= 0); + } + + @HdfsCompatCase + public void msync() throws IOException { + fs().msync(); + } + + @HdfsCompatCase + public void getStatus() throws IOException { + FsStatus status = fs().getStatus(); + Assert.assertTrue(status.getRemaining() > 0); + } + + @HdfsCompatCase + public void getTrashRoot() { + Path trash = fs().getTrashRoot(makePath("file")); + isValid(trash.toString()); + } + + @HdfsCompatCase + public void getTrashRoots() { + Collection trashes = fs().getTrashRoots(true); + Assert.assertNotNull(trashes); + for (FileStatus trash : trashes) { + isValid(trash.getPath().toString()); + } + } + + @HdfsCompatCase + public void getAllStoragePolicies() throws IOException { + Collection policies = + fs().getAllStoragePolicies(); + Assert.assertFalse(policies.isEmpty()); + } + + @HdfsCompatCase + public void supportsSymlinks() { + Assert.assertTrue(fs().supportsSymlinks()); + } + + @HdfsCompatCase + public void hasPathCapability() throws IOException { + List allCaps = new ArrayList<>(); + allCaps.add(CommonPathCapabilities.FS_ACLS); + allCaps.add(CommonPathCapabilities.FS_APPEND); + allCaps.add(CommonPathCapabilities.FS_CHECKSUMS); + allCaps.add(CommonPathCapabilities.FS_CONCAT); + allCaps.add(CommonPathCapabilities.FS_LIST_CORRUPT_FILE_BLOCKS); + allCaps.add(CommonPathCapabilities.FS_PATHHANDLES); + allCaps.add(CommonPathCapabilities.FS_PERMISSIONS); + allCaps.add(CommonPathCapabilities.FS_READ_ONLY_CONNECTOR); + allCaps.add(CommonPathCapabilities.FS_SNAPSHOTS); + allCaps.add(CommonPathCapabilities.FS_STORAGEPOLICY); + allCaps.add(CommonPathCapabilities.FS_SYMLINKS); + allCaps.add(CommonPathCapabilities.FS_TRUNCATE); + allCaps.add(CommonPathCapabilities.FS_XATTRS); + final Path base = getBasePath(); + for (String cap : allCaps) { + if (fs().hasPathCapability(base, cap)) { + return; + } + } + throw new IOException("Cannot find any path capability"); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSnapshot.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSnapshot.java new file mode 100644 index 0000000000000..73d28b7f2bdde --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSnapshot.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.Path; +import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +@HdfsCompatCaseGroup(name = "Snapshot") +public class HdfsCompatSnapshot extends AbstractHdfsCompatCase { + private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatSnapshot.class); + private final String snapshotName = "s-name"; + private final String fileName = "file"; + private Path base; + private Path dir; + private Path snapshot; + private Method allow; + private Method disallow; + + private static Path getSnapshotPath(Path path, String snapshotName) { + return new Path(path, ".snapshot/" + snapshotName); + } + + @HdfsCompatCaseSetUp + public void setUp() throws Exception { + this.base = getUniquePath(); + fs().mkdirs(this.base); + try { + Method allowSnapshotMethod = fs().getClass() + .getMethod("allowSnapshot", Path.class); + allowSnapshotMethod.setAccessible(true); + allowSnapshotMethod.invoke(fs(), this.base); + this.allow = allowSnapshotMethod; + + Method disallowSnapshotMethod = fs().getClass() + .getMethod("disallowSnapshot", Path.class); + disallowSnapshotMethod.setAccessible(true); + disallowSnapshotMethod.invoke(fs(), this.base); + this.disallow = disallowSnapshotMethod; + } catch (InvocationTargetException e) { + // Method exists but the invocation throws an exception. + Throwable cause = e.getCause(); + if (cause instanceof Exception) { + throw (Exception) cause; + } else { + throw new RuntimeException(cause); + } + } catch (ReflectiveOperationException e) { + if (this.allow == null) { + LOG.warn("No allowSnapshot method found."); + } + if (this.disallow == null) { + LOG.warn("No disallowSnapshot method found."); + } + } + } + + @HdfsCompatCaseTearDown + public void tearDown() throws ReflectiveOperationException { + try { + if (this.disallow != null) { + disallow.invoke(fs(), this.base); + } + } finally { + HdfsCompatUtil.deleteQuietly(fs(), this.base, true); + } + } + + @HdfsCompatCasePrepare + public void prepare() throws IOException, ReflectiveOperationException { + this.dir = getUniquePath(base); + HdfsCompatUtil.createFile(fs(), new Path(this.dir, this.fileName), 0); + if (this.allow != null) { + allow.invoke(fs(), this.dir); + } + this.snapshot = fs().createSnapshot(this.dir, this.snapshotName); + } + + @HdfsCompatCaseCleanup + public void cleanup() throws ReflectiveOperationException { + try { + try { + fs().deleteSnapshot(this.dir, this.snapshotName); + } catch (IOException ignored) { + } + if (this.disallow != null) { + disallow.invoke(fs(), this.dir); + } + } finally { + HdfsCompatUtil.deleteQuietly(fs(), this.dir, true); + } + } + + @HdfsCompatCase + public void createSnapshot() throws IOException { + Assert.assertNotEquals(snapshot.toString(), dir.toString()); + Assert.assertTrue(fs().exists(snapshot)); + Assert.assertTrue(fs().exists(new Path(snapshot, fileName))); + } + + @HdfsCompatCase + public void renameSnapshot() throws IOException { + fs().renameSnapshot(dir, snapshotName, "s-name2"); + Assert.assertFalse(fs().exists(new Path(snapshot, fileName))); + snapshot = getSnapshotPath(dir, "s-name2"); + Assert.assertTrue(fs().exists(new Path(snapshot, fileName))); + fs().renameSnapshot(dir, "s-name2", snapshotName); + } + + @HdfsCompatCase + public void deleteSnapshot() throws IOException { + fs().deleteSnapshot(dir, snapshotName); + Assert.assertFalse(fs().exists(snapshot)); + Assert.assertFalse(fs().exists(new Path(snapshot, fileName))); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java new file mode 100644 index 0000000000000..7f150bef24efe --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.BlockStoragePolicySpi; +import org.apache.hadoop.fs.Path; +import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +@HdfsCompatCaseGroup(name = "StoragePolicy") +public class HdfsCompatStoragePolicy extends AbstractHdfsCompatCase { + private static final Logger LOG = + LoggerFactory.getLogger(HdfsCompatStoragePolicy.class); + private Path dir; + private Path file; + private String[] policies; + private String defaultPolicyName; + private String policyName; + + @HdfsCompatCaseSetUp + public void setUp() throws IOException { + policies = getStoragePolicyNames(); + } + + @HdfsCompatCasePrepare + public void prepare() throws IOException { + this.dir = makePath("dir"); + this.file = new Path(this.dir, "file"); + HdfsCompatUtil.createFile(fs(), file, 0); + + BlockStoragePolicySpi policy = fs().getStoragePolicy(this.dir); + this.defaultPolicyName = (policy == null) ? null : policy.getName(); + + List differentPolicies = new ArrayList<>(); + for (String name : policies) { + if (!name.equalsIgnoreCase(defaultPolicyName)) { + differentPolicies.add(name); + } + } + if (differentPolicies.isEmpty()) { + LOG.warn("There is only one storage policy: " + + (defaultPolicyName == null ? "null" : defaultPolicyName)); + this.policyName = defaultPolicyName; + } else { + this.policyName = differentPolicies.get( + new Random().nextInt(differentPolicies.size())); + } + } + + @HdfsCompatCaseCleanup + public void cleanup() { + HdfsCompatUtil.deleteQuietly(fs(), this.dir, true); + } + + @HdfsCompatCase + public void setStoragePolicy() throws IOException { + fs().setStoragePolicy(dir, policyName); + BlockStoragePolicySpi policy = fs().getStoragePolicy(dir); + Assert.assertEquals(policyName, policy.getName()); + } + + @HdfsCompatCase + public void unsetStoragePolicy() throws IOException { + fs().setStoragePolicy(dir, policyName); + fs().unsetStoragePolicy(dir); + BlockStoragePolicySpi policy = fs().getStoragePolicy(dir); + String policyName = (policy == null) ? null : policy.getName(); + Assert.assertEquals(defaultPolicyName, policyName); + } + + @HdfsCompatCase(ifDef = "org.apache.hadoop.fs.FileSystem#satisfyStoragePolicy") + public void satisfyStoragePolicy() throws IOException { + fs().setStoragePolicy(dir, policyName); + fs().satisfyStoragePolicy(dir); + } + + @HdfsCompatCase + public void getStoragePolicy() throws IOException { + BlockStoragePolicySpi policy = fs().getStoragePolicy(file); + String policyName = (policy == null) ? null : policy.getName(); + Assert.assertEquals(defaultPolicyName, policyName); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSymlink.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSymlink.java new file mode 100644 index 0000000000000..8c26f367171da --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSymlink.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.Assert; + +import java.io.IOException; + +@HdfsCompatCaseGroup(name = "Symlink") +public class HdfsCompatSymlink extends AbstractHdfsCompatCase { + private static final int fileLen = 128; + private Path target = null; + private Path link = null; + + @HdfsCompatCaseSetUp + public void setUp() { + FileSystem.enableSymlinks(); + } + + @HdfsCompatCasePrepare + public void prepare() throws IOException { + this.target = makePath("target"); + this.link = new Path(this.target.getParent(), "link"); + HdfsCompatUtil.createFile(fs(), this.target, fileLen); + fs().createSymlink(this.target, this.link, true); + } + + @HdfsCompatCaseCleanup + public void cleanup() throws IOException { + HdfsCompatUtil.deleteQuietly(fs(), this.link, true); + HdfsCompatUtil.deleteQuietly(fs(), this.target, true); + } + + @HdfsCompatCase + public void createSymlink() throws IOException { + Assert.assertTrue(fs().exists(link)); + } + + @HdfsCompatCase + public void getFileLinkStatus() throws IOException { + FileStatus linkStatus = fs().getFileLinkStatus(link); + Assert.assertTrue(linkStatus.isSymlink()); + Assert.assertEquals(target.getName(), linkStatus.getSymlink().getName()); + } + + @HdfsCompatCase + public void getLinkTarget() throws IOException { + Path src = fs().getLinkTarget(link); + Assert.assertEquals(target.getName(), src.getName()); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java new file mode 100644 index 0000000000000..fe1e03a17493f --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.*; +import org.junit.Assert; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +@HdfsCompatCaseGroup(name = "TPCDS") +public class HdfsCompatTpcds extends AbstractHdfsCompatCase { + private static final int fileLen = 8; + private Path path = null; + + @HdfsCompatCasePrepare + public void prepare() throws IOException { + path = makePath("path"); + } + + @HdfsCompatCaseCleanup + public void cleanup() throws IOException { + HdfsCompatUtil.deleteQuietly(fs(), path, true); + } + + @HdfsCompatCase + public void open() throws IOException { + HdfsCompatUtil.createFile(fs(), path, fileLen); + byte[] data = new byte[fileLen]; + try (FSDataInputStream in = fs().open(path)) { + in.readFully(data); + } + } + + @HdfsCompatCase + public void create() throws IOException { + byte[] data = new byte[fileLen]; + new Random().nextBytes(data); + try (FSDataOutputStream out = fs().create(path, true)) { + out.write(data); + } + } + + @HdfsCompatCase + public void mkdirs() throws IOException { + Assert.assertTrue(fs().mkdirs(path)); + } + + @HdfsCompatCase + public void getFileStatus() throws IOException { + HdfsCompatUtil.createFile(fs(), path, fileLen); + FileStatus fileStatus = fs().getFileStatus(path); + Assert.assertEquals(fileLen, fileStatus.getLen()); + } + + @HdfsCompatCase + public void listStatus() throws IOException { + HdfsCompatUtil.createFile(fs(), new Path(path, "file"), fileLen); + FileStatus[] files = fs().listStatus(path); + Assert.assertEquals(1, files.length); + Assert.assertEquals(fileLen, files[0].getLen()); + } + + @HdfsCompatCase + public void listLocatedStatus() throws IOException { + HdfsCompatUtil.createFile(fs(), new Path(path, "file"), fileLen); + RemoteIterator it = fs().listLocatedStatus(path); + List files = new ArrayList<>(); + while (it.hasNext()) { + files.add(it.next()); + } + Assert.assertEquals(1, files.size()); + Assert.assertEquals(fileLen, files.get(0).getLen()); + } + + @HdfsCompatCase + public void rename() throws IOException { + HdfsCompatUtil.createFile(fs(), new Path(path, "file"), fileLen); + fs().rename(path, new Path(path.getParent(), path.getName() + "_dst")); + } + + @HdfsCompatCase + public void delete() throws IOException { + HdfsCompatUtil.createFile(fs(), new Path(path, "file"), fileLen); + fs().delete(path, true); + } + + @HdfsCompatCase + public void getServerDefaults() throws IOException { + Assert.assertNotNull(fs().getServerDefaults(path)); + } + + @HdfsCompatCase + public void getTrashRoot() throws IOException { + Assert.assertNotNull(fs().getTrashRoot(path)); + } + + @HdfsCompatCase + public void makeQualified() throws IOException { + Assert.assertNotNull(fs().makeQualified(path)); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java new file mode 100644 index 0000000000000..05fb9f712c472 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.function; + + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.fs.Path; +import org.junit.Assert; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@HdfsCompatCaseGroup(name = "XAttr") +public class HdfsCompatXAttr extends AbstractHdfsCompatCase { + private Path file; + + @HdfsCompatCasePrepare + public void prepare() throws IOException { + this.file = makePath("file"); + HdfsCompatUtil.createFile(fs(), this.file, 0); + } + + @HdfsCompatCaseCleanup + public void cleanup() { + HdfsCompatUtil.deleteQuietly(fs(), this.file, true); + } + + @HdfsCompatCase + public void setXAttr() throws IOException { + final String key = "user.key"; + final byte[] value = "value".getBytes(); + fs().setXAttr(file, key, value); + Map attrs = fs().getXAttrs(file); + Assert.assertArrayEquals(value, attrs.getOrDefault(key, new byte[0])); + } + + @HdfsCompatCase + public void getXAttr() throws IOException { + final String key = "user.key"; + final byte[] value = "value".getBytes(); + fs().setXAttr(file, key, value); + byte[] attr = fs().getXAttr(file, key); + Assert.assertArrayEquals(value, attr); + } + + @HdfsCompatCase + public void getXAttrs() throws IOException { + fs().setXAttr(file, "user.key1", "value1".getBytes()); + fs().setXAttr(file, "user.key2", "value2".getBytes()); + List keys = new ArrayList<>(); + keys.add("user.key1"); + Map attrs = fs().getXAttrs(file, keys); + Assert.assertEquals(1, attrs.size()); + byte[] attr = attrs.getOrDefault("user.key1", new byte[0]); + Assert.assertArrayEquals("value1".getBytes(), attr); + } + + @HdfsCompatCase + public void listXAttrs() throws IOException { + fs().setXAttr(file, "user.key1", "value1".getBytes()); + fs().setXAttr(file, "user.key2", "value2".getBytes()); + List names = fs().listXAttrs(file); + Assert.assertEquals(2, names.size()); + Assert.assertTrue(names.contains("user.key1")); + Assert.assertTrue(names.contains("user.key2")); + } + + @HdfsCompatCase + public void removeXAttr() throws IOException { + fs().setXAttr(file, "user.key1", "value1".getBytes()); + fs().setXAttr(file, "user.key2", "value2".getBytes()); + fs().removeXAttr(file, "user.key1"); + List names = fs().listXAttrs(file); + Assert.assertEquals(1, names.size()); + Assert.assertTrue(names.contains("user.key2")); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java new file mode 100644 index 0000000000000..8245c3e7fd8d0 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java @@ -0,0 +1,735 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases.implement; + + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatCaseGroup; +import org.apache.hadoop.compat.HdfsCompatUtil; +import org.apache.hadoop.fs.CommonPathCapabilities; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; + +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; + +@HdfsCompatCaseGroup(name = "FileSystem") +public class HdfsCompatFileSystemImpl extends AbstractHdfsCompatCase { + @HdfsCompatCase + public void initialize() throws IOException { + FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf()); + HdfsCompatUtil.checkImplementation(() -> + another.initialize(URI.create("hdfs:///"), new Configuration()) + ); + } + + @HdfsCompatCase + public void getScheme() { + HdfsCompatUtil.checkImplementation(() -> + fs().getScheme() + ); + } + + @HdfsCompatCase + public void getUri() { + HdfsCompatUtil.checkImplementation(() -> + fs().getUri() + ); + } + + @HdfsCompatCase + public void getCanonicalServiceName() { + HdfsCompatUtil.checkImplementation(() -> + fs().getCanonicalServiceName() + ); + } + + @HdfsCompatCase + public void getName() { + HdfsCompatUtil.checkImplementation(() -> + fs().getName() + ); + } + + @HdfsCompatCase + public void makeQualified() { + HdfsCompatUtil.checkImplementation(() -> + fs().makeQualified(new Path("/")) + ); + } + + @HdfsCompatCase + public void getChildFileSystems() { + HdfsCompatUtil.checkImplementation(() -> + fs().getChildFileSystems() + ); + } + + @HdfsCompatCase + public void resolvePath() { + HdfsCompatUtil.checkImplementation(() -> + fs().resolvePath(new Path("/")) + ); + } + + @HdfsCompatCase + public void getHomeDirectory() { + HdfsCompatUtil.checkImplementation(() -> + fs().getHomeDirectory() + ); + } + + @HdfsCompatCase + public void setWorkingDirectory() throws IOException { + FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf()); + HdfsCompatUtil.checkImplementation(() -> + another.setWorkingDirectory(makePath("/tmp")) + ); + } + + @HdfsCompatCase + public void getWorkingDirectory() { + HdfsCompatUtil.checkImplementation(() -> + fs().getWorkingDirectory() + ); + } + + @HdfsCompatCase + public void close() throws IOException { + FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf()); + HdfsCompatUtil.checkImplementation(another::close); + } + + @HdfsCompatCase + public void getDefaultBlockSize() { + HdfsCompatUtil.checkImplementation(() -> + fs().getDefaultBlockSize(getBasePath()) + ); + } + + @HdfsCompatCase + public void getDefaultReplication() { + HdfsCompatUtil.checkImplementation(() -> + fs().getDefaultReplication(getBasePath()) + ); + } + + @HdfsCompatCase + public void getStorageStatistics() { + HdfsCompatUtil.checkImplementation(() -> + fs().getStorageStatistics() + ); + } + + @HdfsCompatCase + public void setVerifyChecksum() { + HdfsCompatUtil.checkImplementation(() -> + fs().setVerifyChecksum(true) + ); + } + + @HdfsCompatCase + public void setWriteChecksum() { + HdfsCompatUtil.checkImplementation(() -> + fs().setWriteChecksum(true) + ); + } + + @HdfsCompatCase + public void getDelegationToken() { + HdfsCompatUtil.checkImplementation(() -> + fs().getDelegationToken("hadoop") + ); + } + + @HdfsCompatCase + public void getAdditionalTokenIssuers() { + HdfsCompatUtil.checkImplementation(() -> + fs().getAdditionalTokenIssuers() + ); + } + + @HdfsCompatCase + public void getServerDefaults() { + HdfsCompatUtil.checkImplementation(() -> + fs().getServerDefaults(new Path("/")) + ); + } + + @HdfsCompatCase + public void msync() { + HdfsCompatUtil.checkImplementation(() -> + fs().msync() + ); + } + + @HdfsCompatCase + public void getStatus() { + HdfsCompatUtil.checkImplementation(() -> + fs().getStatus(new Path("/")) + ); + } + + @HdfsCompatCase + public void getTrashRoot() { + HdfsCompatUtil.checkImplementation(() -> + fs().getTrashRoot(new Path("/user/hadoop/tmp")) + ); + } + + @HdfsCompatCase + public void getTrashRoots() { + HdfsCompatUtil.checkImplementation(() -> + fs().getTrashRoots(true) + ); + } + + @HdfsCompatCase + public void getAllStoragePolicies() { + HdfsCompatUtil.checkImplementation(() -> + fs().getAllStoragePolicies() + ); + } + + @HdfsCompatCase + public void supportsSymlinks() { + HdfsCompatUtil.checkImplementation(() -> + fs().supportsSymlinks() + ); + } + + @HdfsCompatCase + public void hasPathCapability() { + HdfsCompatUtil.checkImplementation(() -> + fs().hasPathCapability(getBasePath(), + CommonPathCapabilities.FS_TRUNCATE) + ); + } + + @HdfsCompatCase + public void mkdirs() { + HdfsCompatUtil.checkImplementation(() -> + fs().mkdirs(makePath("mkdir")) + ); + } + + @HdfsCompatCase + public void getFileStatus() { + HdfsCompatUtil.checkImplementation(() -> + fs().getFileStatus(makePath("file")) + ); + } + + @HdfsCompatCase + public void exists() { + HdfsCompatUtil.checkImplementation(() -> + fs().exists(makePath("file")) + ); + } + + @HdfsCompatCase + public void isDirectory() { + HdfsCompatUtil.checkImplementation(() -> + fs().isDirectory(makePath("file")) + ); + } + + @HdfsCompatCase + public void isFile() { + HdfsCompatUtil.checkImplementation(() -> + fs().isFile(makePath("file")) + ); + } + + @HdfsCompatCase + public void getLength() { + HdfsCompatUtil.checkImplementation(() -> + fs().getLength(makePath("file")) + ); + } + + @HdfsCompatCase + public void getBlockSize() { + HdfsCompatUtil.checkImplementation(() -> + fs().getBlockSize(makePath("file")) + ); + } + + @HdfsCompatCase + public void listStatus() { + HdfsCompatUtil.checkImplementation(() -> + fs().listStatus(makePath("dir")) + ); + } + + @HdfsCompatCase + public void globStatus() { + HdfsCompatUtil.checkImplementation(() -> + fs().globStatus(makePath("dir")) + ); + } + + @HdfsCompatCase + public void listLocatedStatus() { + HdfsCompatUtil.checkImplementation(() -> + fs().listLocatedStatus(makePath("dir")) + ); + } + + @HdfsCompatCase + public void listStatusIterator() { + HdfsCompatUtil.checkImplementation(() -> + fs().listStatusIterator(makePath("dir")) + ); + } + + @HdfsCompatCase + public void listFiles() { + HdfsCompatUtil.checkImplementation(() -> + fs().listFiles(makePath("dir"), false) + ); + } + + @HdfsCompatCase + public void rename() { + HdfsCompatUtil.checkImplementation(() -> + fs().rename(makePath("src"), makePath("dst")) + ); + } + + @HdfsCompatCase + public void delete() { + HdfsCompatUtil.checkImplementation(() -> + fs().delete(makePath("file"), true) + ); + } + + @HdfsCompatCase + public void deleteOnExit() { + HdfsCompatUtil.checkImplementation(() -> + fs().deleteOnExit(makePath("file")) + ); + } + + @HdfsCompatCase + public void cancelDeleteOnExit() { + HdfsCompatUtil.checkImplementation(() -> + fs().cancelDeleteOnExit(makePath("file")) + ); + } + + @HdfsCompatCase + public void truncate() { + HdfsCompatUtil.checkImplementation(() -> + fs().truncate(makePath("file"), 1) + ); + } + + @HdfsCompatCase + public void setOwner() { + HdfsCompatUtil.checkImplementation(() -> + fs().setOwner(makePath("file"), "test-user", "test-group") + ); + } + + @HdfsCompatCase + public void setTimes() { + HdfsCompatUtil.checkImplementation(() -> + fs().setTimes(makePath("file"), 1696089600L, 1696089600L) + ); + } + + @HdfsCompatCase + public void concat() { + HdfsCompatUtil.checkImplementation(() -> + fs().concat(makePath("file"), + new Path[]{makePath("file1"), makePath("file2")}) + ); + } + + @HdfsCompatCase + public void getFileChecksum() { + HdfsCompatUtil.checkImplementation(() -> + fs().getFileChecksum(makePath("file")) + ); + } + + @HdfsCompatCase + public void getFileBlockLocations() { + HdfsCompatUtil.checkImplementation(() -> + fs().getFileBlockLocations(new FileStatus(), 0, 128) + ); + } + + @HdfsCompatCase + public void listCorruptFileBlocks() { + HdfsCompatUtil.checkImplementation(() -> + fs().listCorruptFileBlocks(makePath("file")) + ); + } + + @HdfsCompatCase + public void getReplication() { + HdfsCompatUtil.checkImplementation(() -> + fs().getReplication(makePath("file")) + ); + } + + @HdfsCompatCase + public void setReplication() { + HdfsCompatUtil.checkImplementation(() -> + fs().setReplication(makePath("file"), (short) 2) + ); + } + + @HdfsCompatCase + public void getPathHandle() { + HdfsCompatUtil.checkImplementation(() -> + fs().getPathHandle(new FileStatus()) + ); + } + + @HdfsCompatCase + public void create() { + HdfsCompatUtil.checkImplementation(() -> + fs().create(makePath("file"), true) + ); + } + + @HdfsCompatCase + public void createNonRecursive() { + HdfsCompatUtil.checkImplementation(() -> + fs().createNonRecursive(makePath("file"), true, 1024, + (short) 1, 1048576, null) + ); + } + + @HdfsCompatCase + public void createNewFile() { + HdfsCompatUtil.checkImplementation(() -> + fs().createNewFile(makePath("file")) + ); + } + + @HdfsCompatCase + public void append() throws IOException { + final Path file = makePath("file"); + try { + HdfsCompatUtil.createFile(fs(), file, 0); + HdfsCompatUtil.checkImplementation(() -> + fs().append(file) + ); + } finally { + HdfsCompatUtil.deleteQuietly(fs(), file, true); + } + } + + @HdfsCompatCase + public void createFile() { + HdfsCompatUtil.checkImplementation(() -> + fs().createFile(makePath("file")) + ); + } + + @HdfsCompatCase + public void appendFile() throws IOException { + final Path file = makePath("file"); + try { + HdfsCompatUtil.createFile(fs(), file, 0); + HdfsCompatUtil.checkImplementation(() -> + fs().appendFile(file) + ); + } finally { + HdfsCompatUtil.deleteQuietly(fs(), file, true); + } + } + + @HdfsCompatCase + public void createMultipartUploader() { + HdfsCompatUtil.checkImplementation(() -> + fs().createMultipartUploader(makePath("file")) + ); + } + + @HdfsCompatCase + public void open() { + HdfsCompatUtil.checkImplementation(() -> + fs().open(makePath("file")) + ); + } + + @HdfsCompatCase + public void openFile() { + HdfsCompatUtil.checkImplementation(() -> + fs().openFile(makePath("file")) + ); + } + + @HdfsCompatCase + public void getContentSummary() { + HdfsCompatUtil.checkImplementation(() -> + fs().getContentSummary(makePath("dir")) + ); + } + + @HdfsCompatCase + public void getUsed() { + HdfsCompatUtil.checkImplementation(() -> + fs().getUsed(makePath("dir")) + ); + } + + @HdfsCompatCase + public void getQuotaUsage() { + HdfsCompatUtil.checkImplementation(() -> + fs().getQuotaUsage(makePath("dir")) + ); + } + + @HdfsCompatCase + public void setQuota() { + HdfsCompatUtil.checkImplementation(() -> + fs().setQuota(makePath("dir"), 1024L, 1048576L) + ); + } + + @HdfsCompatCase + public void setQuotaByStorageType() { + HdfsCompatUtil.checkImplementation(() -> + fs().setQuotaByStorageType(makePath("dir"), StorageType.SSD, 1048576L) + ); + } + + @HdfsCompatCase + public void access() { + HdfsCompatUtil.checkImplementation(() -> + fs().access(makePath("file"), FsAction.EXECUTE) + ); + } + + @HdfsCompatCase + public void setPermission() { + HdfsCompatUtil.checkImplementation(() -> + fs().setPermission(makePath("file"), FsPermission.getDefault()) + ); + } + + @HdfsCompatCase + public void createSymlink() { + FileSystem.enableSymlinks(); + HdfsCompatUtil.checkImplementation(() -> + fs().createSymlink(makePath("file"), makePath("link"), true) + ); + } + + @HdfsCompatCase + public void getFileLinkStatus() { + HdfsCompatUtil.checkImplementation(() -> + fs().getFileLinkStatus(makePath("file")) + ); + } + + @HdfsCompatCase + public void getLinkTarget() { + HdfsCompatUtil.checkImplementation(() -> + fs().getLinkTarget(makePath("link")) + ); + } + + @HdfsCompatCase + public void modifyAclEntries() { + List entries = AclEntry.parseAclSpec("user:foo:---", true); + HdfsCompatUtil.checkImplementation(() -> + fs().modifyAclEntries(makePath("modifyAclEntries"), entries) + ); + } + + @HdfsCompatCase + public void removeAclEntries() { + List entries = AclEntry.parseAclSpec("user:foo:---", true); + HdfsCompatUtil.checkImplementation(() -> + fs().removeAclEntries(makePath("removeAclEntries"), entries) + ); + } + + @HdfsCompatCase + public void removeDefaultAcl() { + HdfsCompatUtil.checkImplementation(() -> + fs().removeDefaultAcl(makePath("removeDefaultAcl")) + ); + } + + @HdfsCompatCase + public void removeAcl() { + HdfsCompatUtil.checkImplementation(() -> + fs().removeAcl(makePath("removeAcl")) + ); + } + + @HdfsCompatCase + public void setAcl() { + List entries = AclEntry.parseAclSpec("user:foo:---", true); + HdfsCompatUtil.checkImplementation(() -> + fs().setAcl(makePath("setAcl"), entries) + ); + } + + @HdfsCompatCase + public void getAclStatus() { + HdfsCompatUtil.checkImplementation(() -> + fs().getAclStatus(makePath("getAclStatus")) + ); + } + + @HdfsCompatCase + public void setXAttr() { + HdfsCompatUtil.checkImplementation(() -> + fs().setXAttr(makePath("file"), "test-xattr", "test-value".getBytes()) + ); + } + + @HdfsCompatCase + public void getXAttr() { + HdfsCompatUtil.checkImplementation(() -> + fs().getXAttr(makePath("file"), "test-xattr") + ); + } + + @HdfsCompatCase + public void getXAttrs() { + List names = new ArrayList<>(); + names.add("test-xattr"); + HdfsCompatUtil.checkImplementation(() -> + fs().getXAttrs(makePath("file"), names) + ); + } + + @HdfsCompatCase + public void listXAttrs() { + HdfsCompatUtil.checkImplementation(() -> + fs().listXAttrs(makePath("file")) + ); + } + + @HdfsCompatCase + public void removeXAttr() { + HdfsCompatUtil.checkImplementation(() -> + fs().removeXAttr(makePath("file"), "test-xattr") + ); + } + + @HdfsCompatCase + public void setStoragePolicy() { + HdfsCompatUtil.checkImplementation(() -> + fs().setStoragePolicy(makePath("dir"), "COLD") + ); + } + + @HdfsCompatCase + public void unsetStoragePolicy() { + HdfsCompatUtil.checkImplementation(() -> + fs().unsetStoragePolicy(makePath("dir")) + ); + } + + @HdfsCompatCase + public void satisfyStoragePolicy() { + HdfsCompatUtil.checkImplementation(() -> + fs().satisfyStoragePolicy(makePath("dir")) + ); + } + + @HdfsCompatCase + public void getStoragePolicy() { + HdfsCompatUtil.checkImplementation(() -> + fs().getStoragePolicy(makePath("dir")) + ); + } + + @HdfsCompatCase + public void copyFromLocalFile() { + HdfsCompatUtil.checkImplementation(() -> + fs().copyFromLocalFile(makePath("src"), makePath("dst")) + ); + } + + @HdfsCompatCase + public void moveFromLocalFile() { + HdfsCompatUtil.checkImplementation(() -> + fs().moveFromLocalFile(makePath("src"), makePath("dst")) + ); + } + + @HdfsCompatCase + public void copyToLocalFile() { + HdfsCompatUtil.checkImplementation(() -> + fs().copyToLocalFile(makePath("src"), makePath("dst")) + ); + } + + @HdfsCompatCase + public void moveToLocalFile() { + HdfsCompatUtil.checkImplementation(() -> + fs().moveToLocalFile(makePath("src"), makePath("dst")) + ); + } + + @HdfsCompatCase + public void startLocalOutput() { + HdfsCompatUtil.checkImplementation(() -> + fs().startLocalOutput(makePath("out"), makePath("tmp")) + ); + } + + @HdfsCompatCase + public void completeLocalOutput() { + HdfsCompatUtil.checkImplementation(() -> + fs().completeLocalOutput(makePath("out"), makePath("tmp")) + ); + } + + @HdfsCompatCase + public void createSnapshot() { + HdfsCompatUtil.checkImplementation(() -> + fs().createSnapshot(makePath("file"), "s_name") + ); + } + + @HdfsCompatCase + public void renameSnapshot() { + HdfsCompatUtil.checkImplementation(() -> + fs().renameSnapshot(makePath("file"), "s_name", "n_name") + ); + } + + @HdfsCompatCase + public void deleteSnapshot() { + HdfsCompatUtil.checkImplementation(() -> + fs().deleteSnapshot(makePath("file"), "s_name") + ); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java new file mode 100644 index 0000000000000..a5d1923276da9 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.suites; + + +import org.apache.hadoop.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatSuite; +import org.apache.hadoop.compat.cases.implement.*; +import org.apache.hadoop.compat.cases.function.*; + +public class HdfsCompatSuiteForAll implements HdfsCompatSuite { + private static final Class[] API_CASES = new Class[]{ + HdfsCompatFileSystemImpl.class, + HdfsCompatAcl.class, + HdfsCompatCreate.class, + HdfsCompatDirectory.class, + HdfsCompatFile.class, + HdfsCompatLocal.class, + HdfsCompatServer.class, + HdfsCompatSnapshot.class, + HdfsCompatStoragePolicy.class, + HdfsCompatSymlink.class, + HdfsCompatXAttr.class, + }; + + private static final String[] SHELL_CASES = new String[]{ + "modification.t", + "fileinfo.t", + "read.t", + "remove.t", + "attr.t", + "copy.t", + "move.t", + "concat.t", + "snapshot.t", + "storagePolicy.t", + }; + + @Override + public String getSuiteName() { + return "ALL"; + } + + @Override + public Class[] getApiCases() { + return API_CASES; + } + + @Override + public String[] getShellCases() { + return SHELL_CASES; + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java new file mode 100644 index 0000000000000..dcf57555dcc77 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.suites; + + +import org.apache.hadoop.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatSuite; + +public class HdfsCompatSuiteForShell implements HdfsCompatSuite { + private static final String[] SHELL_CASES = new String[]{ + "modification.t", + "fileinfo.t", + "read.t", + "remove.t", + "attr.t", + "copy.t", + "move.t", + "concat.t", + "snapshot.t", + "storagePolicy.t", + }; + + @Override + public String getSuiteName() { + return "Shell"; + } + + @Override + public Class[] getApiCases() { + return new Class[0]; + } + + @Override + public String[] getShellCases() { + return SHELL_CASES; + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java new file mode 100644 index 0000000000000..c68e361804498 --- /dev/null +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.suites; + + +import org.apache.hadoop.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatSuite; +import org.apache.hadoop.compat.cases.function.*; + +public class HdfsCompatSuiteForTpcds implements HdfsCompatSuite { + private static final Class[] API_CASES = new Class[]{ + HdfsCompatTpcds.class + }; + + @Override + public String getSuiteName() { + return "TPCDS"; + } + + @Override + public Class[] getApiCases() { + return API_CASES; + } + + @Override + public String[] getShellCases() { + return new String[0]; + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties b/hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties new file mode 100644 index 0000000000000..58a6b1325818a --- /dev/null +++ b/hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties @@ -0,0 +1,24 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# log4j configuration used during build and unit tests + +log4j.rootLogger=info,stderr +log4j.threshold=ALL +log4j.appender.stderr=org.apache.log4j.ConsoleAppender +log4j.appender.stderr.Target=System.err +log4j.appender.stderr.layout=org.apache.log4j.PatternLayout +log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatDefaultSuites.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatDefaultSuites.java new file mode 100644 index 0000000000000..2b7adc6ac5287 --- /dev/null +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatDefaultSuites.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + +import org.apache.hadoop.compat.hdfs.HdfsCompatMiniCluster; +import org.apache.hadoop.compat.hdfs.HdfsCompatTestCommand; +import org.apache.hadoop.conf.Configuration; +import org.junit.Assert; +import org.junit.Test; + +public class TestHdfsCompatDefaultSuites { + @Test + public void testSuiteAll() throws Exception { + HdfsCompatMiniCluster cluster = new HdfsCompatMiniCluster(); + try { + cluster.start(); + final String uri = cluster.getUri() + "/tmp"; + Configuration conf = cluster.getConf(); + HdfsCompatCommand cmd = new HdfsCompatTestCommand(uri, "ALL", conf); + cmd.initialize(); + HdfsCompatReport report = cmd.apply(); + Assert.assertEquals(0, report.getFailedCase().size()); + new HdfsCompatibility(conf).printReport(report, System.out); + } finally { + cluster.shutdown(); + } + } + + @Test + public void testSuiteTpcds() throws Exception { + HdfsCompatMiniCluster cluster = new HdfsCompatMiniCluster(); + try { + cluster.start(); + final String uri = cluster.getUri() + "/tmp"; + Configuration conf = cluster.getConf(); + HdfsCompatCommand cmd = new HdfsCompatTestCommand(uri, "TPCDS", conf); + cmd.initialize(); + HdfsCompatReport report = cmd.apply(); + Assert.assertEquals(0, report.getFailedCase().size()); + new HdfsCompatibility(conf).printReport(report, System.out); + } finally { + cluster.shutdown(); + } + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatFsCommand.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatFsCommand.java new file mode 100644 index 0000000000000..ed17fbced273d --- /dev/null +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatFsCommand.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + + +import org.apache.hadoop.compat.hdfs.HdfsCompatMiniCluster; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.compat.cases.HdfsCompatAclTestCases; +import org.apache.hadoop.compat.cases.HdfsCompatMkdirTestCases; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.HashMap; +import java.util.Map; + +public class TestHdfsCompatFsCommand { + @Test + public void testDfsCompatibility() throws Exception { + final String suite = "ALL"; + HdfsCompatMiniCluster cluster = null; + try { + cluster = new HdfsCompatMiniCluster(); + cluster.start(); + final String uri = cluster.getUri() + "/tmp"; + final Configuration conf = cluster.getConf(); + + HdfsCompatCommand cmd = new TestCommand(uri, suite, conf); + cmd.initialize(); + HdfsCompatReport report = cmd.apply(); + Assert.assertEquals(7, report.getPassedCase().size()); + Assert.assertEquals(0, report.getFailedCase().size()); + show(conf, report); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + @Test + public void testLocalFsCompatibility() throws Exception { + final String uri = "file:///tmp/"; + final String suite = "ALL"; + final Configuration conf = new Configuration(); + HdfsCompatCommand cmd = new TestCommand(uri, suite, conf); + cmd.initialize(); + HdfsCompatReport report = cmd.apply(); + Assert.assertEquals(1, report.getPassedCase().size()); + Assert.assertEquals(6, report.getFailedCase().size()); + show(conf, report); + cleanup(cmd, conf); + } + + @Test + public void testFsCompatibilityWithSuite() throws Exception { + final String uri = "file:///tmp/"; + final String suite = "acl"; + final Configuration conf = new Configuration(); + HdfsCompatCommand cmd = new TestCommand(uri, suite, conf); + cmd.initialize(); + HdfsCompatReport report = cmd.apply(); + Assert.assertEquals(0, report.getPassedCase().size()); + Assert.assertEquals(6, report.getFailedCase().size()); + show(conf, report); + cleanup(cmd, conf); + } + + private void show(Configuration conf, HdfsCompatReport report) throws IOException { + new HdfsCompatibility(conf).printReport(report, System.out); + } + + private void cleanup(HdfsCompatCommand cmd, Configuration conf) throws Exception { + Path basePath = ((TestCommand) cmd).getBasePath(); + FileSystem fs = basePath.getFileSystem(conf); + fs.delete(basePath, true); + } + + private static class TestCommand extends HdfsCompatCommand { + private TestCommand(String uri, String suiteName, Configuration conf) { + super(uri, suiteName, conf); + } + + @Override + protected Map getDefaultSuites() { + Map defaultSuites = new HashMap<>(); + defaultSuites.put("all", new AllTestSuite()); + defaultSuites.put("mkdir", new MkdirTestSuite()); + defaultSuites.put("acl", new AclTestSuite()); + return defaultSuites; + } + + private Path getBasePath() throws ReflectiveOperationException { + Field apiField = HdfsCompatCommand.class.getDeclaredField("api"); + apiField.setAccessible(true); + HdfsCompatApiScope api = (HdfsCompatApiScope) apiField.get(this); + Field envField = api.getClass().getDeclaredField("env"); + envField.setAccessible(true); + HdfsCompatEnvironment env = (HdfsCompatEnvironment) envField.get(api); + return env.getBase(); + } + } + + private static class AllTestSuite implements HdfsCompatSuite { + @Override + public String getSuiteName() { + return "All (Test)"; + } + + @Override + public Class[] getApiCases() { + return new Class[]{ + HdfsCompatMkdirTestCases.class, + HdfsCompatAclTestCases.class, + }; + } + + @Override + public String[] getShellCases() { + return new String[0]; + } + } + + private static class MkdirTestSuite implements HdfsCompatSuite { + @Override + public String getSuiteName() { + return "Mkdir"; + } + + @Override + public Class[] getApiCases() { + return new Class[]{ + HdfsCompatMkdirTestCases.class, + }; + } + + @Override + public String[] getShellCases() { + return new String[0]; + } + } + + private static class AclTestSuite implements HdfsCompatSuite { + @Override + public String getSuiteName() { + return "ACL"; + } + + @Override + public Class[] getApiCases() { + return new Class[]{ + HdfsCompatAclTestCases.class, + }; + } + + @Override + public String[] getShellCases() { + return new String[0]; + } + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatInterfaceCoverage.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatInterfaceCoverage.java new file mode 100644 index 0000000000000..77643560aaba3 --- /dev/null +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatInterfaceCoverage.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + + +import org.apache.hadoop.compat.cases.implement.HdfsCompatFileSystemImpl; +import org.apache.hadoop.fs.FileSystem; +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Test; + +import java.lang.reflect.Method; +import java.util.HashSet; +import java.util.Set; + +public class TestHdfsCompatInterfaceCoverage { + @Test + @Ignore + public void testFsCompatibility() { + Set publicMethods = getPublicInterfaces(FileSystem.class); + Set targets = getTargets(HdfsCompatFileSystemImpl.class); + for (String publicMethod : publicMethods) { + Assert.assertTrue("Method not tested: " + publicMethod, + targets.contains(publicMethod)); + } + } + + private Set getPublicInterfaces(Class cls) { + return HdfsCompatApiScope.getPublicInterfaces(cls); + } + + private Set getTargets(Class cls) { + Method[] methods = cls.getDeclaredMethods(); + Set targets = new HashSet<>(); + for (Method method : methods) { + if (method.isAnnotationPresent(HdfsCompatCase.class)) { + targets.add(method.getName()); + } + } + return targets; + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatShellCommand.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatShellCommand.java new file mode 100644 index 0000000000000..8d4c1678647c5 --- /dev/null +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatShellCommand.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat; + + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.compat.hdfs.HdfsCompatMiniCluster; +import org.apache.hadoop.compat.hdfs.HdfsCompatTestCommand; +import org.apache.hadoop.compat.hdfs.HdfsCompatTestShellScope; +import org.apache.hadoop.conf.Configuration; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; + +public class TestHdfsCompatShellCommand { + private HdfsCompatMiniCluster cluster; + + @Before + public void runCluster() throws IOException { + this.cluster = new HdfsCompatMiniCluster(); + this.cluster.start(); + } + + @After + public void shutdownCluster() { + this.cluster.shutdown(); + this.cluster = null; + } + + @Test + public void testDfsCompatibility() throws Exception { + final String uri = cluster.getUri() + "/tmp"; + final Configuration conf = cluster.getConf(); + HdfsCompatCommand cmd = new TestCommand(uri, conf); + cmd.initialize(); + HdfsCompatReport report = cmd.apply(); + Assert.assertEquals(3, report.getPassedCase().size()); + Assert.assertEquals(0, report.getFailedCase().size()); + show(conf, report); + } + + @Test + public void testSkipCompatibility() throws Exception { + final String uri = cluster.getUri() + "/tmp"; + final Configuration conf = cluster.getConf(); + HdfsCompatCommand cmd = new TestSkipCommand(uri, conf); + cmd.initialize(); + HdfsCompatReport report = cmd.apply(); + Assert.assertEquals(2, report.getPassedCase().size()); + Assert.assertEquals(0, report.getFailedCase().size()); + show(conf, report); + } + + private void show(Configuration conf, HdfsCompatReport report) throws IOException { + new HdfsCompatibility(conf).printReport(report, System.out); + } + + private static class TestCommand extends HdfsCompatTestCommand { + private TestCommand(String uri, Configuration conf) { + super(uri, "shell", conf); + } + + @Override + protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + return new TestShellScope(env, suite); + } + } + + private static class TestSkipCommand extends HdfsCompatTestCommand { + private TestSkipCommand(String uri, Configuration conf) { + super(uri, "shell", conf); + } + + @Override + protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + return new TestShellScopeForSkip(env, suite); + } + } + + private static class TestShellScope extends HdfsCompatTestShellScope { + public TestShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + super(env, suite); + } + + @Override + protected void replace(File scriptDir) throws IOException { + File casesDir = new File(scriptDir, "cases"); + FileUtils.deleteDirectory(casesDir); + Files.createDirectories(casesDir.toPath()); + copyResource("/test-case-simple.t", new File(casesDir, "test-case-simple.t")); + } + } + + private static class TestShellScopeForSkip extends HdfsCompatTestShellScope { + public TestShellScopeForSkip(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + super(env, suite); + } + + @Override + protected void replace(File scriptDir) throws IOException { + File casesDir = new File(scriptDir, "cases"); + FileUtils.deleteDirectory(casesDir); + Files.createDirectories(casesDir.toPath()); + copyResource("/test-case-skip.t", new File(casesDir, "test-case-skip.t")); + } + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatAclTestCases.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatAclTestCases.java new file mode 100644 index 0000000000000..15baf04eac0ae --- /dev/null +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatAclTestCases.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases; + +import org.apache.hadoop.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatUtil; + +import java.util.ArrayList; + +public class HdfsCompatAclTestCases extends AbstractHdfsCompatCase { + @HdfsCompatCase + public void modifyAclEntries() { + HdfsCompatUtil.checkImplementation(() -> + fs().modifyAclEntries(makePath("modifyAclEntries"), new ArrayList<>()) + ); + } + + @HdfsCompatCase + public void removeAclEntries() { + HdfsCompatUtil.checkImplementation(() -> + fs().removeAclEntries(makePath("removeAclEntries"), new ArrayList<>()) + ); + } + + @HdfsCompatCase + public void removeDefaultAcl() { + HdfsCompatUtil.checkImplementation(() -> + fs().removeDefaultAcl(makePath("removeDefaultAcl")) + ); + } + + @HdfsCompatCase + public void removeAcl() { + HdfsCompatUtil.checkImplementation(() -> + fs().removeAcl(makePath("removeAcl")) + ); + } + + @HdfsCompatCase + public void setAcl() { + HdfsCompatUtil.checkImplementation(() -> + fs().setAcl(makePath("setAcl"), new ArrayList<>()) + ); + } + + @HdfsCompatCase + public void getAclStatus() { + HdfsCompatUtil.checkImplementation(() -> + fs().getAclStatus(makePath("getAclStatus")) + ); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatMkdirTestCases.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatMkdirTestCases.java new file mode 100644 index 0000000000000..26209dcacc8be --- /dev/null +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatMkdirTestCases.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.cases; + +import org.apache.hadoop.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatCase; +import org.apache.hadoop.compat.HdfsCompatUtil; + +public class HdfsCompatMkdirTestCases extends AbstractHdfsCompatCase { + @HdfsCompatCase + public void mkdirs() { + HdfsCompatUtil.checkImplementation(() -> + fs().mkdirs(makePath("mkdir")) + ); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatMiniCluster.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatMiniCluster.java new file mode 100644 index 0000000000000..ee6fe440edc9e --- /dev/null +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatMiniCluster.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.compat.hdfs; + + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.net.URI; + + +public class HdfsCompatMiniCluster { + private static final Logger LOG = + LoggerFactory.getLogger(HdfsCompatMiniCluster.class); + + private MiniDFSCluster cluster = null; + + public HdfsCompatMiniCluster() { + } + + public synchronized void start() throws IOException { + FileSystem.enableSymlinks(); + Configuration conf = new Configuration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, "true"); + conf.set(DFSConfigKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, + "kms://http@localhost:9600/kms/foo"); + conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, "external"); + conf.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true"); + conf.set("fs.hdfs.compatibility.privileged.user", + UserGroupInformation.getCurrentUser().getShortUserName()); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitClusterUp(); + } + + public synchronized void shutdown() { + if (cluster != null) { + cluster.shutdown(true); + cluster = null; + } + } + + public synchronized Configuration getConf() throws IOException { + if (cluster == null) { + throw new IOException("Cluster not running"); + } + return cluster.getFileSystem().getConf(); + } + + public synchronized URI getUri() throws IOException { + if (cluster == null) { + throw new IOException("Cluster not running"); + } + return cluster.getFileSystem().getUri(); + } + + public static void main(String[] args) + throws IOException, InterruptedException { + long duration = 5L * 60L * 1000L; + if ((args != null) && (args.length > 0)) { + duration = Long.parseLong(args[0]); + } + + HdfsCompatMiniCluster cluster = new HdfsCompatMiniCluster(); + try { + cluster.start(); + Configuration conf = cluster.getConf(); + + final String confDir = System.getenv("HADOOP_CONF_DIR"); + final File confFile = new File(confDir, "core-site.xml"); + try (OutputStream out = new FileOutputStream(confFile)) { + conf.writeXml(out); + } + + final long endTime = System.currentTimeMillis() + duration; + long sleepTime = getSleepTime(endTime); + while (sleepTime > 0) { + LOG.warn("Service running ..."); + Thread.sleep(sleepTime); + sleepTime = getSleepTime(endTime); + } + } finally { + cluster.shutdown(); + } + } + + private static long getSleepTime(long endTime) { + long maxTime = endTime - System.currentTimeMillis(); + return (maxTime < 5000) ? maxTime : 5000; + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java new file mode 100644 index 0000000000000..8d17890490637 --- /dev/null +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java @@ -0,0 +1,35 @@ +package org.apache.hadoop.compat.hdfs; + + +import org.apache.hadoop.compat.*; +import org.apache.hadoop.conf.Configuration; + +import java.io.IOException; +import java.lang.reflect.Field; + +public class HdfsCompatTestCommand extends HdfsCompatCommand { + public HdfsCompatTestCommand(String uri, String suiteName, Configuration conf) { + super(uri, suiteName, conf); + } + + @Override + public void initialize() throws IOException, ReflectiveOperationException { + super.initialize(); + Field shellField = HdfsCompatCommand.class.getDeclaredField("shell"); + shellField.setAccessible(true); + HdfsCompatShellScope shell = (HdfsCompatShellScope) shellField.get(this); + if (shell != null) { + Field envField = shell.getClass().getDeclaredField("env"); + envField.setAccessible(true); + HdfsCompatEnvironment env = (HdfsCompatEnvironment) envField.get(shell); + Field suiteField = HdfsCompatCommand.class.getDeclaredField("suite"); + suiteField.setAccessible(true); + HdfsCompatSuite suite = (HdfsCompatSuite) suiteField.get(this); + shellField.set(this, getShellScope(env, suite)); + } + } + + protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + return new HdfsCompatTestShellScope(env, suite); + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java new file mode 100644 index 0000000000000..17d3c546945a2 --- /dev/null +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java @@ -0,0 +1,96 @@ +package org.apache.hadoop.compat.hdfs; + +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.compat.HdfsCompatEnvironment; +import org.apache.hadoop.compat.HdfsCompatShellScope; +import org.apache.hadoop.compat.HdfsCompatSuite; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class HdfsCompatTestShellScope extends HdfsCompatShellScope { + private final HdfsCompatEnvironment env; + + public HdfsCompatTestShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + super(env, suite); + this.env = env; + } + + @Override + protected String[] getEnv(File localDir, File scriptDir, File confDir) + throws IOException { + replace(scriptDir); + File binDir = new File(scriptDir, "bin"); + copyToBin(binDir); + confDir = new File(scriptDir, "hadoop-conf-ut"); + writeConf(confDir); + File logConfFile = new File(confDir, "log4j.properties"); + copyResource("/hadoop-compat-bench-log4j.properties", logConfFile); + + String javaHome = System.getProperty("java.home"); + String javaBin = javaHome + File.separator + "bin" + + File.separator + "java"; + String classpath = confDir.getAbsolutePath() + ":" + + System.getProperty("java.class.path"); + String pathenv = System.getenv("PATH"); + if ((pathenv == null) || pathenv.isEmpty()) { + pathenv = binDir.getAbsolutePath(); + } else { + pathenv = binDir.getAbsolutePath() + ":" + pathenv; + } + + List confEnv = new ArrayList<>(); + Collections.addAll(confEnv, super.getEnv(localDir, scriptDir, confDir)); + confEnv.add("HADOOP_COMPAT_JAVA_BIN=" + javaBin); + confEnv.add("HADOOP_COMPAT_JAVA_CLASSPATH=" + classpath); + confEnv.add("HADOOP_CONF_DIR=" + confDir.getAbsolutePath()); + confEnv.add("PATH=" + pathenv); + return confEnv.toArray(new String[0]); + } + + @VisibleForTesting + protected void replace(File scriptDir) throws IOException { + } + + private void copyToBin(File binDir) throws IOException { + Files.createDirectories(binDir.toPath()); + File hadoop = new File(binDir, "hadoop"); + File hdfs = new File(binDir, "hdfs"); + copyResource("/hadoop-compat-bench-test-shell-hadoop.sh", hadoop); + copyResource("/hadoop-compat-bench-test-shell-hdfs.sh", hdfs); + if (!hadoop.setReadable(true, false) || + !hadoop.setWritable(true, false) || + !hadoop.setExecutable(true, false)) { + throw new IOException("No permission to hadoop shell."); + } + if (!hdfs.setReadable(true, false) || + !hdfs.setWritable(true, false) || + !hdfs.setExecutable(true, false)) { + throw new IOException("No permission to hdfs shell."); + } + } + + private void writeConf(File confDir) throws IOException { + Files.createDirectories(confDir.toPath()); + if (!confDir.setReadable(true, false) || + !confDir.setWritable(true, false) || + !confDir.setExecutable(true, false)) { + throw new IOException("No permission to conf dir."); + } + File confFile = new File(confDir, "core-site.xml"); + try (OutputStream out = new FileOutputStream(confFile)) { + this.env.getFileSystem().getConf().writeXml(out); + } + if (!confFile.setReadable(true, false) || + !confFile.setWritable(true, false) || + !confFile.setExecutable(true, false)) { + throw new IOException("No permission to conf file."); + } + } +} \ No newline at end of file diff --git a/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hadoop.sh b/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hadoop.sh new file mode 100644 index 0000000000000..95653d7aa92e7 --- /dev/null +++ b/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hadoop.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmd="${1}" +shift + +if [ X"${cmd}" != X"fs" ]; then + exit 1 +fi + +javaBin="${HADOOP_COMPAT_JAVA_BIN}" +javaCp="${HADOOP_COMPAT_JAVA_CLASSPATH}" +fsShell="org.apache.hadoop.fs.FsShell" + +$javaBin -cp "${javaCp}" "${fsShell}" "$@" diff --git a/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hdfs.sh b/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hdfs.sh new file mode 100644 index 0000000000000..f24d32fed17ef --- /dev/null +++ b/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hdfs.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmd="${1}" +shift + +if [ X"${cmd}" != X"dfs" ] && [ X"${cmd}" != X"storagepolicies" ]; then + exit 1 +fi + +javaBin="${HADOOP_COMPAT_JAVA_BIN}" +javaCp="${HADOOP_COMPAT_JAVA_CLASSPATH}" +if [ X"${cmd}" = X"dfs" ]; then + fsShell="org.apache.hadoop.fs.FsShell" +else + fsShell="org.apache.hadoop.hdfs.tools.StoragePolicyAdmin" +fi + +$javaBin -cp "${javaCp}" "${fsShell}" "$@" diff --git a/hadoop-compat-bench/src/test/resources/test-case-simple.t b/hadoop-compat-bench/src/test/resources/test-case-simple.t new file mode 100644 index 0000000000000..f5aaba68bbed3 --- /dev/null +++ b/hadoop-compat-bench/src/test/resources/test-case-simple.t @@ -0,0 +1,10 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "Hello World!" > "${localDir}/dat" + +echo "1..3" + +expect_ret "mkdir (ut)" 0 hadoop fs -mkdir -p "${baseDir}/dir" +expect_ret "put (ut)" 0 hadoop fs -put "${localDir}/dat" "${baseDir}/dir/" +expect_ret "rm (ut)" 0 hadoop fs -rm -r -skipTrash "${baseDir}/dir" diff --git a/hadoop-compat-bench/src/test/resources/test-case-skip.t b/hadoop-compat-bench/src/test/resources/test-case-skip.t new file mode 100644 index 0000000000000..462ee525b93f8 --- /dev/null +++ b/hadoop-compat-bench/src/test/resources/test-case-skip.t @@ -0,0 +1,8 @@ +#!/bin/sh +. $(dirname "$0")/../misc.sh + +echo "1..3" + +expect_ret "mkdir (ut)" 0 hadoop fs -mkdir -p "${baseDir}/dir" +expect_ret "nonExistCommand (ut)" 0 hadoop fs -nonExistCommand "${baseDir}/dir" +expect_ret "rm (ut)" 0 hadoop fs -rm -r -skipTrash "${baseDir}/dir" From b1a0e3f04fccc70db560e8d6f5c601bdadcd487f Mon Sep 17 00:00:00 2001 From: "chonghu.lh" Date: Tue, 27 Feb 2024 21:00:41 +0800 Subject: [PATCH 2/4] fix pr failed checks listed in https://github.com/apache/hadoop/pull/6535#issuecomment-1933237018 --- hadoop-compat-bench/HdfsCompatBenchIssue.md | 2 +- hadoop-compat-bench/shell/cases/attr.t | 16 ++++++ hadoop-compat-bench/shell/cases/concat.t | 16 ++++++ hadoop-compat-bench/shell/cases/copy.t | 16 ++++++ hadoop-compat-bench/shell/cases/fileinfo.t | 16 ++++++ .../shell/cases/modification.t | 16 ++++++ hadoop-compat-bench/shell/cases/move.t | 16 ++++++ hadoop-compat-bench/shell/cases/read.t | 16 ++++++ hadoop-compat-bench/shell/cases/remove.t | 16 ++++++ hadoop-compat-bench/shell/cases/snapshot.t | 16 ++++++ .../shell/cases/storagePolicy.t | 16 ++++++ hadoop-compat-bench/shell/misc.sh | 39 +++++++++++--- .../hadoop/compat/AbstractHdfsCompatCase.java | 3 +- .../hadoop/compat/HdfsCompatEnvironment.java | 10 ++-- .../HdfsCompatIllegalArgumentException.java | 17 ++++++ .../hadoop/compat/HdfsCompatShellScope.java | 26 +++++++-- .../apache/hadoop/compat/HdfsCompatSuite.java | 17 ++++++ .../apache/hadoop/compat/HdfsCompatUtil.java | 4 +- .../hadoop/compat/HdfsCompatibility.java | 6 ++- .../cases/function/HdfsCompatCreate.java | 3 +- .../compat/cases/function/HdfsCompatFile.java | 7 +-- .../cases/function/HdfsCompatLocal.java | 3 +- .../function/HdfsCompatStoragePolicy.java | 3 +- .../cases/function/HdfsCompatTpcds.java | 3 +- .../cases/function/HdfsCompatXAttr.java | 25 +++++---- .../implement/HdfsCompatFileSystemImpl.java | 4 +- .../compat/suites/HdfsCompatSuiteForAll.java | 54 +++++++++---------- .../suites/HdfsCompatSuiteForShell.java | 26 +++++---- .../suites/HdfsCompatSuiteForTpcds.java | 8 ++- .../compat/hdfs/HdfsCompatTestCommand.java | 17 ++++++ .../compat/hdfs/HdfsCompatTestShellScope.java | 17 ++++++ .../src/test/resources/test-case-simple.t | 16 ++++++ .../src/test/resources/test-case-skip.t | 16 ++++++ 33 files changed, 400 insertions(+), 86 deletions(-) diff --git a/hadoop-compat-bench/HdfsCompatBenchIssue.md b/hadoop-compat-bench/HdfsCompatBenchIssue.md index 77c3e2b246f41..cb9e2a96cd50e 100644 --- a/hadoop-compat-bench/HdfsCompatBenchIssue.md +++ b/hadoop-compat-bench/HdfsCompatBenchIssue.md @@ -44,7 +44,7 @@ Accordingly, we propose to define a formal HCFS compatibility benchmark and prov to do the compatibility assessment for an HCFS storage system. The benchmark and tool should consider both HCFS interfaces and hdfs shell commands. Different scenarios require different kinds of compatibilities. -For such consideration, we could define different suites in the benchmark. +For such consideration, we could define different suites in the benchmark. ## Benefits diff --git a/hadoop-compat-bench/shell/cases/attr.t b/hadoop-compat-bench/shell/cases/attr.t index 00f87cffdb92f..c140c51106777 100644 --- a/hadoop-compat-bench/shell/cases/attr.t +++ b/hadoop-compat-bench/shell/cases/attr.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/concat.t b/hadoop-compat-bench/shell/cases/concat.t index 22f7cd5c543d7..967880423dd18 100644 --- a/hadoop-compat-bench/shell/cases/concat.t +++ b/hadoop-compat-bench/shell/cases/concat.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/copy.t b/hadoop-compat-bench/shell/cases/copy.t index 76d246b30b920..b4a12fcaed224 100644 --- a/hadoop-compat-bench/shell/cases/copy.t +++ b/hadoop-compat-bench/shell/cases/copy.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/fileinfo.t b/hadoop-compat-bench/shell/cases/fileinfo.t index ad95d9b194146..5acb8bc121ecc 100644 --- a/hadoop-compat-bench/shell/cases/fileinfo.t +++ b/hadoop-compat-bench/shell/cases/fileinfo.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/modification.t b/hadoop-compat-bench/shell/cases/modification.t index 808fb00823b99..a0eef06b4d524 100644 --- a/hadoop-compat-bench/shell/cases/modification.t +++ b/hadoop-compat-bench/shell/cases/modification.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/move.t b/hadoop-compat-bench/shell/cases/move.t index 3dc4029f9c235..8e34e010116f0 100644 --- a/hadoop-compat-bench/shell/cases/move.t +++ b/hadoop-compat-bench/shell/cases/move.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/read.t b/hadoop-compat-bench/shell/cases/read.t index cbe9124f94137..0de56a4593bfd 100644 --- a/hadoop-compat-bench/shell/cases/read.t +++ b/hadoop-compat-bench/shell/cases/read.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/remove.t b/hadoop-compat-bench/shell/cases/remove.t index 11699b392ea78..584fa553c454b 100644 --- a/hadoop-compat-bench/shell/cases/remove.t +++ b/hadoop-compat-bench/shell/cases/remove.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/shell/cases/snapshot.t b/hadoop-compat-bench/shell/cases/snapshot.t index 92a876cabfc26..07ab3cc264ab3 100644 --- a/hadoop-compat-bench/shell/cases/snapshot.t +++ b/hadoop-compat-bench/shell/cases/snapshot.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "1..3" diff --git a/hadoop-compat-bench/shell/cases/storagePolicy.t b/hadoop-compat-bench/shell/cases/storagePolicy.t index 16b00f90d19c5..49ed2d8feba8c 100644 --- a/hadoop-compat-bench/shell/cases/storagePolicy.t +++ b/hadoop-compat-bench/shell/cases/storagePolicy.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/shell/misc.sh b/hadoop-compat-bench/shell/misc.sh index 8c2a27dd199e4..71040cee14b22 100644 --- a/hadoop-compat-bench/shell/misc.sh +++ b/hadoop-compat-bench/shell/misc.sh @@ -1,5 +1,20 @@ #!/bin/sh +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + ntest=1 fname="$0" @@ -13,10 +28,10 @@ prepare() { FAIL_FILE="${HADOOP_COMPAT_FAIL_FILE}" SKIP_FILE="${HADOOP_COMPAT_SKIP_FILE}" - baseDir="${BASE_URI}/${fname}" - localDir="${LOCAL_URI}/${fname}" - snapshotDir="${SNAPSHOT_URI}" - storagePolicy="${STORAGE_POLICY}" + export baseDir="${BASE_URI}/${fname}" + export localDir="${LOCAL_URI}/${fname}" + export snapshotDir="${SNAPSHOT_URI}" + export storagePolicy="${STORAGE_POLICY}" stdoutDir="${STDOUT_DIR}/${fname}/stdout" stderrDir="${STDOUT_DIR}/${fname}/stderr" mkdir -p "${stdoutDir}" @@ -87,7 +102,6 @@ expect_lines() { ( if should_skip "${stderr}"; then skip_case "${cname}" else - set -- ${lines} lineCount="0" while read -r line; do case "${line}" in @@ -95,7 +109,8 @@ expect_lines() { ( continue ;; esac - if ! echo "${line}" | grep -Eq '^'"${1}"'$'; then + selectedLine=$(expect_lines_select "${lines}" "${lineCount}") + if ! echo "${line}" | grep -Eq '^'"${selectedLine}"'$'; then lineCount="-1" break else @@ -120,6 +135,18 @@ expect_lines_parse() { done } +expect_lines_select() { + lineSelector="0" + echo "${1}" | while read -r splittedLine; do + if [ "${lineSelector}" -eq "${2}" ]; then + echo "${splittedLine}" + return + fi + lineSelector=$((lineSelector + 1)) + done + echo "" +} + is_hadoop_shell() { if [ X"${1}" = X"hadoop" ] || [ X"${1}" = X"hdfs" ]; then return 0 diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java index eced86144d6aa..b59fe8d6a4e03 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java @@ -28,6 +28,7 @@ public abstract class AbstractHdfsCompatCase { private FileSystem fs; private HdfsCompatEnvironment env; private Path localPath; + private static final Random random = new Random(); public AbstractHdfsCompatCase() { } @@ -57,7 +58,7 @@ public Path getUniquePath() { public static Path getUniquePath(Path basePath) { return new Path(basePath, System.currentTimeMillis() - + "_" + new Random().nextLong()); + + "_" + random.nextLong()); } public Path makePath(String name) { diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java index 3251bac5868e7..c729a6d150f39 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java @@ -39,10 +39,10 @@ import java.util.stream.Collectors; public class HdfsCompatEnvironment { - private static final SimpleDateFormat dateFormat = - new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss"); private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatEnvironment.class); + private static final String dateFormat = "yyyy_MM_dd_HH_mm_ss"; + private static final Random random = new Random(); private final Path uri; private final Configuration conf; private FileSystem fs; @@ -61,7 +61,7 @@ public void init() throws IOException { Date now = new Date(); String uuid = UUID.randomUUID().toString(); String uniqueDir = "hadoop-compatibility-benchmark/" + - dateFormat.format(now) + "/" + uuid; + new SimpleDateFormat(dateFormat).format(now) + "/" + uuid; this.fs = uri.getFileSystem(conf); this.localFs = FileSystem.getLocal(conf); @@ -113,7 +113,7 @@ public String[] getStoragePolicyNames() { final String key = "fs." + scheme + ".compatibility.storage.policies"; final String storagePolicies = conf.get(key, null); return (storagePolicies != null) ? storagePolicies.split(",") : - defaultStoragePolicyNames; + defaultStoragePolicyNames.clone(); } public String getDelegationTokenRenewer() { @@ -135,7 +135,7 @@ private String getEnvTmpDir() { return null; } final String tmpDir = validDirs.get( - new Random().nextInt(validDirs.size())); + random.nextInt(validDirs.size())); return new File(tmpDir).getAbsolutePath(); } diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java index e4d18ef954347..ab49203b2ec7a 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.compat; public class HdfsCompatIllegalArgumentException diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java index 447fee97008b0..08e2325ed4b35 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.compat; @@ -11,8 +28,8 @@ import java.io.BufferedReader; import java.io.File; +import java.io.FileInputStream; import java.io.FileOutputStream; -import java.io.FileReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; @@ -33,6 +50,7 @@ public class HdfsCompatShellScope { private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatShellScope.class); + private static final Random random = new Random(); private final HdfsCompatEnvironment env; private final HdfsCompatSuite suite; private File stdoutDir = null; @@ -225,7 +243,7 @@ private String getStoragePolicy() { return defPolicyName; } else { return differentPolicies.get( - new Random().nextInt(differentPolicies.size())); + random.nextInt(differentPolicies.size())); } } @@ -353,8 +371,8 @@ private HdfsCompatReport export() throws IOException { private List readLines(File file) throws IOException { List lines = new ArrayList<>(); - try (BufferedReader br = new BufferedReader( - new FileReader(file))) { + try (BufferedReader br = new BufferedReader(new InputStreamReader( + new FileInputStream(file), StandardCharsets.UTF_8))) { String line = br.readLine(); while (line != null) { lines.add(line); diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java index 03147f51abd9e..3ee446fcd777b 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.compat; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java index a3cd0ec84fcc4..337efe2e7c02c 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java @@ -33,6 +33,7 @@ public class HdfsCompatUtil { private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatUtil.class); + private static final Random random = new Random(); public static void checkImplementation(ImplementationFunction func) { try { @@ -67,10 +68,9 @@ public static void createFile(FileSystem fs, Path file, boolean overwrite, bufferSize, replication, blockSize)) { if (fileLen > 0) { byte[] toWrite = new byte[bufferSize]; - Random rb = new Random(); long bytesToWrite = fileLen; while (bytesToWrite > 0) { - rb.nextBytes(toWrite); + random.nextBytes(toWrite); int bytesToWriteNext = (bufferSize < bytesToWrite) ? bufferSize : (int) bytesToWrite; out.write(toWrite, 0, bytesToWriteNext); diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java index 5e4942566ec5f..d4e32cb6999f3 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java @@ -24,6 +24,7 @@ import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.Collection; @@ -205,8 +206,9 @@ void printReport(HdfsCompatReport report, OutputStream out) printOut(shortMessage); if (out != null) { - out.write(shortMessage.getBytes()); - BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out)); + out.write(shortMessage.getBytes(StandardCharsets.UTF_8)); + BufferedWriter writer = new BufferedWriter( + new OutputStreamWriter(out, StandardCharsets.UTF_8)); writer.newLine(); writer.write("PASSED CASES:"); writer.newLine(); diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java index 26863a8d5fe12..cd4642d5af0ac 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java @@ -23,6 +23,7 @@ import org.junit.Assert; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.concurrent.CompletableFuture; @HdfsCompatCaseGroup(name = "Create") @@ -97,7 +98,7 @@ public void createFile() throws IOException { try { FSDataOutputStreamBuilder builder = fs().createFile(file); out = builder.blockSize(1048576 * 2).build(); - out.write("Hello World!".getBytes()); + out.write("Hello World!".getBytes(StandardCharsets.UTF_8)); out.close(); out = null; Assert.assertTrue(fs().exists(file)); diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java index b4e4d3814533a..f81c1043a5a30 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java @@ -35,6 +35,7 @@ public class HdfsCompatFile extends AbstractHdfsCompatCase { private static final int fileLen = 128; private static final long blockSize = 1048576; private static final short replication = 1; + private static final Random random = new Random(); private Path file = null; @HdfsCompatCasePrepare @@ -109,7 +110,7 @@ public void cancelDeleteOnExit() throws IOException { @HdfsCompatCase public void truncate() throws IOException, InterruptedException { - int newLen = new Random().nextInt(fileLen); + int newLen = random.nextInt(fileLen); boolean finished = fs().truncate(file, newLen); while (!finished) { Thread.sleep(1000); @@ -121,8 +122,8 @@ public void truncate() throws IOException, InterruptedException { @HdfsCompatCase public void setOwner() throws Exception { - final String owner = "test_" + new Random().nextInt(1024); - final String group = "test_" + new Random().nextInt(1024); + final String owner = "test_" + random.nextInt(1024); + final String group = "test_" + random.nextInt(1024); final String privileged = getPrivilegedUser(); UserGroupInformation.createRemoteUser(privileged).doAs( (PrivilegedExceptionAction) () -> { diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java index 8ff57e2a30a23..d3db5dbd59adf 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java @@ -30,6 +30,7 @@ @HdfsCompatCaseGroup(name = "Local") public class HdfsCompatLocal extends AbstractHdfsCompatCase { private static final int fileLen = 128; + private static final Random random = new Random(); private LocalFileSystem localFs; private Path localBasePath; private Path localSrc; @@ -51,7 +52,7 @@ public void tearDown() { @HdfsCompatCasePrepare public void prepare() throws IOException { final String unique = System.currentTimeMillis() - + "_" + new Random().nextLong() + "/"; + + "_" + random.nextLong() + "/"; this.localSrc = new Path(localBasePath, unique + "src"); this.localDst = new Path(localBasePath, unique + "dst"); this.src = new Path(getBasePath(), unique + "src"); diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java index 7f150bef24efe..fd5efbe934840 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java @@ -34,6 +34,7 @@ public class HdfsCompatStoragePolicy extends AbstractHdfsCompatCase { private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatStoragePolicy.class); + private static final Random random = new Random(); private Path dir; private Path file; private String[] policies; @@ -66,7 +67,7 @@ public void prepare() throws IOException { this.policyName = defaultPolicyName; } else { this.policyName = differentPolicies.get( - new Random().nextInt(differentPolicies.size())); + random.nextInt(differentPolicies.size())); } } diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java index fe1e03a17493f..9c9f295f26852 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java @@ -29,6 +29,7 @@ @HdfsCompatCaseGroup(name = "TPCDS") public class HdfsCompatTpcds extends AbstractHdfsCompatCase { private static final int fileLen = 8; + private static final Random random = new Random(); private Path path = null; @HdfsCompatCasePrepare @@ -53,7 +54,7 @@ public void open() throws IOException { @HdfsCompatCase public void create() throws IOException { byte[] data = new byte[fileLen]; - new Random().nextBytes(data); + random.nextBytes(data); try (FSDataOutputStream out = fs().create(path, true)) { out.write(data); } diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java index 05fb9f712c472..5e21a8b47a15c 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java @@ -23,6 +23,7 @@ import org.junit.Assert; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -45,7 +46,7 @@ public void cleanup() { @HdfsCompatCase public void setXAttr() throws IOException { final String key = "user.key"; - final byte[] value = "value".getBytes(); + final byte[] value = "value".getBytes(StandardCharsets.UTF_8); fs().setXAttr(file, key, value); Map attrs = fs().getXAttrs(file); Assert.assertArrayEquals(value, attrs.getOrDefault(key, new byte[0])); @@ -54,7 +55,7 @@ public void setXAttr() throws IOException { @HdfsCompatCase public void getXAttr() throws IOException { final String key = "user.key"; - final byte[] value = "value".getBytes(); + final byte[] value = "value".getBytes(StandardCharsets.UTF_8); fs().setXAttr(file, key, value); byte[] attr = fs().getXAttr(file, key); Assert.assertArrayEquals(value, attr); @@ -62,20 +63,24 @@ public void getXAttr() throws IOException { @HdfsCompatCase public void getXAttrs() throws IOException { - fs().setXAttr(file, "user.key1", "value1".getBytes()); - fs().setXAttr(file, "user.key2", "value2".getBytes()); + fs().setXAttr(file, "user.key1", + "value1".getBytes(StandardCharsets.UTF_8)); + fs().setXAttr(file, "user.key2", + "value2".getBytes(StandardCharsets.UTF_8)); List keys = new ArrayList<>(); keys.add("user.key1"); Map attrs = fs().getXAttrs(file, keys); Assert.assertEquals(1, attrs.size()); byte[] attr = attrs.getOrDefault("user.key1", new byte[0]); - Assert.assertArrayEquals("value1".getBytes(), attr); + Assert.assertArrayEquals("value1".getBytes(StandardCharsets.UTF_8), attr); } @HdfsCompatCase public void listXAttrs() throws IOException { - fs().setXAttr(file, "user.key1", "value1".getBytes()); - fs().setXAttr(file, "user.key2", "value2".getBytes()); + fs().setXAttr(file, "user.key1", + "value1".getBytes(StandardCharsets.UTF_8)); + fs().setXAttr(file, "user.key2", + "value2".getBytes(StandardCharsets.UTF_8)); List names = fs().listXAttrs(file); Assert.assertEquals(2, names.size()); Assert.assertTrue(names.contains("user.key1")); @@ -84,8 +89,10 @@ public void listXAttrs() throws IOException { @HdfsCompatCase public void removeXAttr() throws IOException { - fs().setXAttr(file, "user.key1", "value1".getBytes()); - fs().setXAttr(file, "user.key2", "value2".getBytes()); + fs().setXAttr(file, "user.key1", + "value1".getBytes(StandardCharsets.UTF_8)); + fs().setXAttr(file, "user.key2", + "value2".getBytes(StandardCharsets.UTF_8)); fs().removeXAttr(file, "user.key1"); List names = fs().listXAttrs(file); Assert.assertEquals(1, names.size()); diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java index 8245c3e7fd8d0..300b4756d60ac 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java @@ -34,6 +34,7 @@ import java.io.IOException; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -608,7 +609,8 @@ public void getAclStatus() { @HdfsCompatCase public void setXAttr() { HdfsCompatUtil.checkImplementation(() -> - fs().setXAttr(makePath("file"), "test-xattr", "test-value".getBytes()) + fs().setXAttr(makePath("file"), "test-xattr", + "test-value".getBytes(StandardCharsets.UTF_8)) ); } diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java index a5d1923276da9..8d384a0c79696 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java @@ -24,33 +24,6 @@ import org.apache.hadoop.compat.cases.function.*; public class HdfsCompatSuiteForAll implements HdfsCompatSuite { - private static final Class[] API_CASES = new Class[]{ - HdfsCompatFileSystemImpl.class, - HdfsCompatAcl.class, - HdfsCompatCreate.class, - HdfsCompatDirectory.class, - HdfsCompatFile.class, - HdfsCompatLocal.class, - HdfsCompatServer.class, - HdfsCompatSnapshot.class, - HdfsCompatStoragePolicy.class, - HdfsCompatSymlink.class, - HdfsCompatXAttr.class, - }; - - private static final String[] SHELL_CASES = new String[]{ - "modification.t", - "fileinfo.t", - "read.t", - "remove.t", - "attr.t", - "copy.t", - "move.t", - "concat.t", - "snapshot.t", - "storagePolicy.t", - }; - @Override public String getSuiteName() { return "ALL"; @@ -58,11 +31,34 @@ public String getSuiteName() { @Override public Class[] getApiCases() { - return API_CASES; + return new Class[]{ + HdfsCompatFileSystemImpl.class, + HdfsCompatAcl.class, + HdfsCompatCreate.class, + HdfsCompatDirectory.class, + HdfsCompatFile.class, + HdfsCompatLocal.class, + HdfsCompatServer.class, + HdfsCompatSnapshot.class, + HdfsCompatStoragePolicy.class, + HdfsCompatSymlink.class, + HdfsCompatXAttr.class, + }; } @Override public String[] getShellCases() { - return SHELL_CASES; + return new String[]{ + "modification.t", + "fileinfo.t", + "read.t", + "remove.t", + "attr.t", + "copy.t", + "move.t", + "concat.t", + "snapshot.t", + "storagePolicy.t", + }; } } \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java index dcf57555dcc77..b440efb24f7ac 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java @@ -22,19 +22,6 @@ import org.apache.hadoop.compat.HdfsCompatSuite; public class HdfsCompatSuiteForShell implements HdfsCompatSuite { - private static final String[] SHELL_CASES = new String[]{ - "modification.t", - "fileinfo.t", - "read.t", - "remove.t", - "attr.t", - "copy.t", - "move.t", - "concat.t", - "snapshot.t", - "storagePolicy.t", - }; - @Override public String getSuiteName() { return "Shell"; @@ -47,6 +34,17 @@ public Class[] getApiCases() { @Override public String[] getShellCases() { - return SHELL_CASES; + return new String[]{ + "modification.t", + "fileinfo.t", + "read.t", + "remove.t", + "attr.t", + "copy.t", + "move.t", + "concat.t", + "snapshot.t", + "storagePolicy.t", + }; } } \ No newline at end of file diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java index c68e361804498..9d69427697ede 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java +++ b/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java @@ -23,10 +23,6 @@ import org.apache.hadoop.compat.cases.function.*; public class HdfsCompatSuiteForTpcds implements HdfsCompatSuite { - private static final Class[] API_CASES = new Class[]{ - HdfsCompatTpcds.class - }; - @Override public String getSuiteName() { return "TPCDS"; @@ -34,7 +30,9 @@ public String getSuiteName() { @Override public Class[] getApiCases() { - return API_CASES; + return new Class[]{ + HdfsCompatTpcds.class + }; } @Override diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java index 8d17890490637..e998ee8f18d25 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.compat.hdfs; diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java index 17d3c546945a2..62e95eace0fb8 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java +++ b/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.compat.hdfs; import org.apache.hadoop.classification.VisibleForTesting; diff --git a/hadoop-compat-bench/src/test/resources/test-case-simple.t b/hadoop-compat-bench/src/test/resources/test-case-simple.t index f5aaba68bbed3..62b37a2c87780 100644 --- a/hadoop-compat-bench/src/test/resources/test-case-simple.t +++ b/hadoop-compat-bench/src/test/resources/test-case-simple.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "Hello World!" > "${localDir}/dat" diff --git a/hadoop-compat-bench/src/test/resources/test-case-skip.t b/hadoop-compat-bench/src/test/resources/test-case-skip.t index 462ee525b93f8..ad0f85aea5567 100644 --- a/hadoop-compat-bench/src/test/resources/test-case-skip.t +++ b/hadoop-compat-bench/src/test/resources/test-case-skip.t @@ -1,4 +1,20 @@ #!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + . $(dirname "$0")/../misc.sh echo "1..3" From 91a14fc494ca32be55ff297d9355edba1289d74e Mon Sep 17 00:00:00 2001 From: "chonghu.lh" Date: Wed, 28 Feb 2024 20:37:51 +0800 Subject: [PATCH 3/4] modify package name and move hadoop-compat-bench from hadoop/ to hadoop/hadoop-tools/ --- .../hadoop-compat-bench}/HdfsCompatBenchIssue.md | 0 .../hadoop-compat-bench}/pom.xml | 8 ++++---- .../hadoop-compat-bench}/shell/cases/attr.t | 0 .../hadoop-compat-bench}/shell/cases/concat.t | 0 .../hadoop-compat-bench}/shell/cases/copy.t | 0 .../hadoop-compat-bench}/shell/cases/fileinfo.t | 0 .../hadoop-compat-bench}/shell/cases/modification.t | 0 .../hadoop-compat-bench}/shell/cases/move.t | 0 .../hadoop-compat-bench}/shell/cases/read.t | 0 .../hadoop-compat-bench}/shell/cases/remove.t | 0 .../hadoop-compat-bench}/shell/cases/snapshot.t | 0 .../hadoop-compat-bench}/shell/cases/storagePolicy.t | 0 .../hadoop-compat-bench}/shell/misc.sh | 0 .../hadoop/fs}/compat/AbstractHdfsCompatCase.java | 2 +- .../apache/hadoop/fs}/compat/HdfsCompatApiScope.java | 2 +- .../org/apache/hadoop/fs}/compat/HdfsCompatCase.java | 2 +- .../hadoop/fs}/compat/HdfsCompatCaseCleanup.java | 2 +- .../apache/hadoop/fs}/compat/HdfsCompatCaseGroup.java | 2 +- .../hadoop/fs}/compat/HdfsCompatCasePrepare.java | 2 +- .../apache/hadoop/fs}/compat/HdfsCompatCaseSetUp.java | 2 +- .../hadoop/fs}/compat/HdfsCompatCaseTearDown.java | 2 +- .../apache/hadoop/fs}/compat/HdfsCompatCommand.java | 8 ++++---- .../hadoop/fs}/compat/HdfsCompatEnvironment.java | 2 +- .../compat/HdfsCompatIllegalArgumentException.java | 2 +- .../fs}/compat/HdfsCompatIllegalCaseException.java | 2 +- .../apache/hadoop/fs}/compat/HdfsCompatReport.java | 2 +- .../hadoop/fs}/compat/HdfsCompatShellScope.java | 2 +- .../org/apache/hadoop/fs}/compat/HdfsCompatSuite.java | 2 +- .../org/apache/hadoop/fs}/compat/HdfsCompatUtil.java | 2 +- .../apache/hadoop/fs}/compat/HdfsCompatibility.java | 2 +- .../fs}/compat/cases/function/HdfsCompatAcl.java | 4 ++-- .../fs}/compat/cases/function/HdfsCompatCreate.java | 4 ++-- .../compat/cases/function/HdfsCompatDirectory.java | 4 ++-- .../fs}/compat/cases/function/HdfsCompatFile.java | 4 ++-- .../fs}/compat/cases/function/HdfsCompatLocal.java | 5 ++--- .../fs}/compat/cases/function/HdfsCompatServer.java | 10 +++++----- .../fs}/compat/cases/function/HdfsCompatSnapshot.java | 5 ++--- .../cases/function/HdfsCompatStoragePolicy.java | 5 ++--- .../fs}/compat/cases/function/HdfsCompatSymlink.java | 4 ++-- .../fs}/compat/cases/function/HdfsCompatTpcds.java | 4 ++-- .../fs}/compat/cases/function/HdfsCompatXAttr.java | 5 ++--- .../cases/implement/HdfsCompatFileSystemImpl.java | 10 +++++----- .../fs}/compat/suites/HdfsCompatSuiteForAll.java | 11 +++++------ .../fs}/compat/suites/HdfsCompatSuiteForShell.java | 7 +++---- .../fs}/compat/suites/HdfsCompatSuiteForTpcds.java | 9 ++++----- .../resources/hadoop-compat-bench-log4j.properties | 0 .../fs}/compat/TestHdfsCompatDefaultSuites.java | 6 +++--- .../hadoop/fs}/compat/TestHdfsCompatFsCommand.java | 8 ++++---- .../fs}/compat/TestHdfsCompatInterfaceCoverage.java | 4 ++-- .../hadoop/fs}/compat/TestHdfsCompatShellCommand.java | 8 ++++---- .../fs}/compat/cases/HdfsCompatAclTestCases.java | 8 ++++---- .../fs}/compat/cases/HdfsCompatMkdirTestCases.java | 8 ++++---- .../hadoop/fs}/compat/hdfs/HdfsCompatMiniCluster.java | 2 +- .../hadoop/fs}/compat/hdfs/HdfsCompatTestCommand.java | 7 +++++-- .../fs}/compat/hdfs/HdfsCompatTestShellScope.java | 8 ++++---- .../hadoop-compat-bench-test-shell-hadoop.sh | 0 .../resources/hadoop-compat-bench-test-shell-hdfs.sh | 0 .../src/test/resources/test-case-simple.t | 0 .../src/test/resources/test-case-skip.t | 0 hadoop-tools/pom.xml | 1 + 60 files changed, 98 insertions(+), 101 deletions(-) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/HdfsCompatBenchIssue.md (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/pom.xml (93%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/attr.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/concat.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/copy.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/fileinfo.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/modification.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/move.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/read.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/remove.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/snapshot.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/cases/storagePolicy.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/shell/misc.sh (100%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/AbstractHdfsCompatCase.java (98%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatApiScope.java (99%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatCase.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatCaseCleanup.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatCaseGroup.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatCasePrepare.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatCaseSetUp.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatCaseTearDown.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatCommand.java (95%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatEnvironment.java (99%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatIllegalArgumentException.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatIllegalCaseException.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatReport.java (98%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatShellScope.java (99%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatSuite.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatUtil.java (99%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/HdfsCompatibility.java (99%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatAcl.java (97%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatCreate.java (98%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatDirectory.java (98%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatFile.java (98%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatLocal.java (97%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatServer.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatSnapshot.java (98%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatStoragePolicy.java (97%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatSymlink.java (96%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatTpcds.java (97%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/function/HdfsCompatXAttr.java (97%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/cases/implement/HdfsCompatFileSystemImpl.java (98%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/suites/HdfsCompatSuiteForAll.java (85%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/suites/HdfsCompatSuiteForShell.java (89%) rename {hadoop-compat-bench/src/main/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs}/compat/suites/HdfsCompatSuiteForTpcds.java (84%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/src/main/resources/hadoop-compat-bench-log4j.properties (100%) rename {hadoop-compat-bench/src/test/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs}/compat/TestHdfsCompatDefaultSuites.java (92%) rename {hadoop-compat-bench/src/test/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs}/compat/TestHdfsCompatFsCommand.java (95%) rename {hadoop-compat-bench/src/test/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs}/compat/TestHdfsCompatInterfaceCoverage.java (94%) rename {hadoop-compat-bench/src/test/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs}/compat/TestHdfsCompatShellCommand.java (94%) rename {hadoop-compat-bench/src/test/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs}/compat/cases/HdfsCompatAclTestCases.java (90%) rename {hadoop-compat-bench/src/test/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs}/compat/cases/HdfsCompatMkdirTestCases.java (83%) rename {hadoop-compat-bench/src/test/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs}/compat/hdfs/HdfsCompatMiniCluster.java (98%) rename {hadoop-compat-bench/src/test/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs}/compat/hdfs/HdfsCompatTestCommand.java (88%) rename {hadoop-compat-bench/src/test/java/org/apache/hadoop => hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs}/compat/hdfs/HdfsCompatTestShellScope.java (95%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/src/test/resources/hadoop-compat-bench-test-shell-hadoop.sh (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/src/test/resources/hadoop-compat-bench-test-shell-hdfs.sh (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/src/test/resources/test-case-simple.t (100%) rename {hadoop-compat-bench => hadoop-tools/hadoop-compat-bench}/src/test/resources/test-case-skip.t (100%) diff --git a/hadoop-compat-bench/HdfsCompatBenchIssue.md b/hadoop-tools/hadoop-compat-bench/HdfsCompatBenchIssue.md similarity index 100% rename from hadoop-compat-bench/HdfsCompatBenchIssue.md rename to hadoop-tools/hadoop-compat-bench/HdfsCompatBenchIssue.md diff --git a/hadoop-compat-bench/pom.xml b/hadoop-tools/hadoop-compat-bench/pom.xml similarity index 93% rename from hadoop-compat-bench/pom.xml rename to hadoop-tools/hadoop-compat-bench/pom.xml index c904f3dfc4fa3..f9b082def4541 100644 --- a/hadoop-compat-bench/pom.xml +++ b/hadoop-tools/hadoop-compat-bench/pom.xml @@ -19,7 +19,7 @@ org.apache.hadoop hadoop-project 3.5.0-SNAPSHOT - ../hadoop-project + ../../hadoop-project hadoop-compat-bench 3.5.0-SNAPSHOT @@ -79,7 +79,7 @@ - org.apache.hadoop.compat.HdfsCompatibility + org.apache.hadoop.fs.compat.HdfsCompatibility @@ -91,7 +91,7 @@ - org.apache.hadoop.compat.hdfs.HdfsCompatMiniCluster + org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster @@ -115,4 +115,4 @@ - + \ No newline at end of file diff --git a/hadoop-compat-bench/shell/cases/attr.t b/hadoop-tools/hadoop-compat-bench/shell/cases/attr.t similarity index 100% rename from hadoop-compat-bench/shell/cases/attr.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/attr.t diff --git a/hadoop-compat-bench/shell/cases/concat.t b/hadoop-tools/hadoop-compat-bench/shell/cases/concat.t similarity index 100% rename from hadoop-compat-bench/shell/cases/concat.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/concat.t diff --git a/hadoop-compat-bench/shell/cases/copy.t b/hadoop-tools/hadoop-compat-bench/shell/cases/copy.t similarity index 100% rename from hadoop-compat-bench/shell/cases/copy.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/copy.t diff --git a/hadoop-compat-bench/shell/cases/fileinfo.t b/hadoop-tools/hadoop-compat-bench/shell/cases/fileinfo.t similarity index 100% rename from hadoop-compat-bench/shell/cases/fileinfo.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/fileinfo.t diff --git a/hadoop-compat-bench/shell/cases/modification.t b/hadoop-tools/hadoop-compat-bench/shell/cases/modification.t similarity index 100% rename from hadoop-compat-bench/shell/cases/modification.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/modification.t diff --git a/hadoop-compat-bench/shell/cases/move.t b/hadoop-tools/hadoop-compat-bench/shell/cases/move.t similarity index 100% rename from hadoop-compat-bench/shell/cases/move.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/move.t diff --git a/hadoop-compat-bench/shell/cases/read.t b/hadoop-tools/hadoop-compat-bench/shell/cases/read.t similarity index 100% rename from hadoop-compat-bench/shell/cases/read.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/read.t diff --git a/hadoop-compat-bench/shell/cases/remove.t b/hadoop-tools/hadoop-compat-bench/shell/cases/remove.t similarity index 100% rename from hadoop-compat-bench/shell/cases/remove.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/remove.t diff --git a/hadoop-compat-bench/shell/cases/snapshot.t b/hadoop-tools/hadoop-compat-bench/shell/cases/snapshot.t similarity index 100% rename from hadoop-compat-bench/shell/cases/snapshot.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/snapshot.t diff --git a/hadoop-compat-bench/shell/cases/storagePolicy.t b/hadoop-tools/hadoop-compat-bench/shell/cases/storagePolicy.t similarity index 100% rename from hadoop-compat-bench/shell/cases/storagePolicy.t rename to hadoop-tools/hadoop-compat-bench/shell/cases/storagePolicy.t diff --git a/hadoop-compat-bench/shell/misc.sh b/hadoop-tools/hadoop-compat-bench/shell/misc.sh similarity index 100% rename from hadoop-compat-bench/shell/misc.sh rename to hadoop-tools/hadoop-compat-bench/shell/misc.sh diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/AbstractHdfsCompatCase.java similarity index 98% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/AbstractHdfsCompatCase.java index b59fe8d6a4e03..0fa5e49f05efc 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/AbstractHdfsCompatCase.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/AbstractHdfsCompatCase.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatApiScope.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatApiScope.java similarity index 99% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatApiScope.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatApiScope.java index 4e57e22854484..eb572bb629674 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatApiScope.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatApiScope.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import org.apache.hadoop.classification.VisibleForTesting; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCase.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCase.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCase.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCase.java index c62bf6295947d..f7d2b566f3abb 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCase.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCase.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseCleanup.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseCleanup.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseCleanup.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseCleanup.java index a6af788820a12..5ef6af21aac36 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseCleanup.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseCleanup.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseGroup.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseGroup.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseGroup.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseGroup.java index 53554b0890763..8f47538b52909 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseGroup.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseGroup.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCasePrepare.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCasePrepare.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCasePrepare.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCasePrepare.java index eff03f6414c2b..aa1cc770a8861 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCasePrepare.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCasePrepare.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseSetUp.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseSetUp.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseSetUp.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseSetUp.java index a80f8595fcb9c..10a7eff0bc71b 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseSetUp.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseSetUp.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseTearDown.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseTearDown.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseTearDown.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseTearDown.java index 0282958431a73..e7a74459d23be 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCaseTearDown.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCaseTearDown.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCommand.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCommand.java similarity index 95% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCommand.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCommand.java index 08b40c74ce54f..8ef35647dd45a 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatCommand.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatCommand.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.compat.suites.HdfsCompatSuiteForAll; -import org.apache.hadoop.compat.suites.HdfsCompatSuiteForShell; -import org.apache.hadoop.compat.suites.HdfsCompatSuiteForTpcds; +import org.apache.hadoop.fs.compat.suites.HdfsCompatSuiteForAll; +import org.apache.hadoop.fs.compat.suites.HdfsCompatSuiteForShell; +import org.apache.hadoop.fs.compat.suites.HdfsCompatSuiteForTpcds; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatEnvironment.java similarity index 99% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatEnvironment.java index c729a6d150f39..408581e42aa39 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatEnvironment.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatEnvironment.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatIllegalArgumentException.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatIllegalArgumentException.java index ab49203b2ec7a..1ab8afbc920aa 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalArgumentException.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatIllegalArgumentException.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; public class HdfsCompatIllegalArgumentException extends IllegalArgumentException { diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalCaseException.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatIllegalCaseException.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalCaseException.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatIllegalCaseException.java index 3ecef57f02524..21e85c4cb5254 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatIllegalCaseException.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatIllegalCaseException.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import org.apache.hadoop.HadoopIllegalArgumentException; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatReport.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatReport.java similarity index 98% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatReport.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatReport.java index f7103f88b6ba3..d5713ced10a08 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatReport.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatReport.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import java.util.Collection; import java.util.concurrent.ConcurrentLinkedQueue; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatShellScope.java similarity index 99% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatShellScope.java index 08e2325ed4b35..4753dd1f797b6 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatShellScope.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatShellScope.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import org.apache.commons.io.FileUtils; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatSuite.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatSuite.java index 3ee446fcd777b..a399bd323ec7f 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatSuite.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatSuite.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; public interface HdfsCompatSuite { diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatUtil.java similarity index 99% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatUtil.java index 337efe2e7c02c..813b902327d02 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatUtil.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatUtil.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import org.apache.hadoop.fs.FSDataInputStream; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatibility.java similarity index 99% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatibility.java index d4e32cb6999f3..dc5464abf9663 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/HdfsCompatibility.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatibility.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import java.io.BufferedWriter; import java.io.File; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatAcl.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatAcl.java similarity index 97% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatAcl.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatAcl.java index 2463d5c4131e0..5138b890bed0f 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatAcl.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatAcl.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.compat.*; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclStatus; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatCreate.java similarity index 98% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatCreate.java index cd4642d5af0ac..c08ccc3af7daf 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatCreate.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatCreate.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.compat.*; import org.apache.hadoop.io.IOUtils; import org.junit.Assert; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatDirectory.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatDirectory.java similarity index 98% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatDirectory.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatDirectory.java index bb9a9d0b9291e..bf58a1fd1f846 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatDirectory.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatDirectory.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.compat.*; import org.junit.Assert; import java.io.IOException; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatFile.java similarity index 98% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatFile.java index f81c1043a5a30..ba2159148b3cb 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatFile.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatFile.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.compat.*; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatLocal.java similarity index 97% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatLocal.java index d3db5dbd59adf..52404f116a8d7 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatLocal.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatLocal.java @@ -15,13 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; - -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.compat.*; import org.junit.Assert; import java.io.IOException; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatServer.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatServer.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatServer.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatServer.java index ccd60f23eaf73..9f1c62c1139bd 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatServer.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatServer.java @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; -import org.apache.hadoop.compat.AbstractHdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatCaseGroup; -import org.apache.hadoop.compat.HdfsCompatUtil; +import org.apache.hadoop.fs.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatCaseGroup; +import org.apache.hadoop.fs.compat.HdfsCompatUtil; import org.apache.hadoop.fs.*; import org.junit.Assert; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSnapshot.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSnapshot.java similarity index 98% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSnapshot.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSnapshot.java index 73d28b7f2bdde..d710b46c8edb3 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSnapshot.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSnapshot.java @@ -15,11 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; - -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.compat.*; import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatStoragePolicy.java similarity index 97% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatStoragePolicy.java index fd5efbe934840..b87f64d78eedc 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatStoragePolicy.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatStoragePolicy.java @@ -15,12 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; - -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.compat.*; import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSymlink.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSymlink.java similarity index 96% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSymlink.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSymlink.java index 8c26f367171da..5836155a5abe7 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatSymlink.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSymlink.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.compat.*; import org.junit.Assert; import java.io.IOException; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatTpcds.java similarity index 97% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatTpcds.java index 9c9f295f26852..1dfe3ca4d9e8b 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatTpcds.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatTpcds.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.compat.*; import org.junit.Assert; import java.io.IOException; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatXAttr.java similarity index 97% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatXAttr.java index 5e21a8b47a15c..6cd514ff7be0b 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/function/HdfsCompatXAttr.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatXAttr.java @@ -15,11 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.function; +package org.apache.hadoop.fs.compat.cases.function; - -import org.apache.hadoop.compat.*; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.compat.*; import org.junit.Assert; import java.io.IOException; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/HdfsCompatFileSystemImpl.java similarity index 98% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/HdfsCompatFileSystemImpl.java index 300b4756d60ac..db296c4938f8a 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/cases/implement/HdfsCompatFileSystemImpl.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/HdfsCompatFileSystemImpl.java @@ -15,14 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases.implement; +package org.apache.hadoop.fs.compat.cases.implement; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.compat.AbstractHdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatCaseGroup; -import org.apache.hadoop.compat.HdfsCompatUtil; +import org.apache.hadoop.fs.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatCaseGroup; +import org.apache.hadoop.fs.compat.HdfsCompatUtil; import org.apache.hadoop.fs.CommonPathCapabilities; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForAll.java similarity index 85% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForAll.java index 8d384a0c79696..83edd4346455c 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForAll.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForAll.java @@ -15,13 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.suites; +package org.apache.hadoop.fs.compat.suites; - -import org.apache.hadoop.compat.AbstractHdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatSuite; -import org.apache.hadoop.compat.cases.implement.*; -import org.apache.hadoop.compat.cases.function.*; +import org.apache.hadoop.fs.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatSuite; +import org.apache.hadoop.fs.compat.cases.function.*; +import org.apache.hadoop.fs.compat.cases.implement.HdfsCompatFileSystemImpl; public class HdfsCompatSuiteForAll implements HdfsCompatSuite { @Override diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForShell.java similarity index 89% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForShell.java index b440efb24f7ac..da5f2d6675b73 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForShell.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForShell.java @@ -15,11 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.suites; +package org.apache.hadoop.fs.compat.suites; - -import org.apache.hadoop.compat.AbstractHdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatSuite; +import org.apache.hadoop.fs.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatSuite; public class HdfsCompatSuiteForShell implements HdfsCompatSuite { @Override diff --git a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForTpcds.java similarity index 84% rename from hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java rename to hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForTpcds.java index 9d69427697ede..9872a76344f3c 100644 --- a/hadoop-compat-bench/src/main/java/org/apache/hadoop/compat/suites/HdfsCompatSuiteForTpcds.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForTpcds.java @@ -15,12 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.suites; +package org.apache.hadoop.fs.compat.suites; - -import org.apache.hadoop.compat.AbstractHdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatSuite; -import org.apache.hadoop.compat.cases.function.*; +import org.apache.hadoop.fs.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatSuite; +import org.apache.hadoop.fs.compat.cases.function.HdfsCompatTpcds; public class HdfsCompatSuiteForTpcds implements HdfsCompatSuite { @Override diff --git a/hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties b/hadoop-tools/hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties similarity index 100% rename from hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties rename to hadoop-tools/hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatDefaultSuites.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatDefaultSuites.java similarity index 92% rename from hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatDefaultSuites.java rename to hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatDefaultSuites.java index 2b7adc6ac5287..bf22cc96e611c 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatDefaultSuites.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatDefaultSuites.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; -import org.apache.hadoop.compat.hdfs.HdfsCompatMiniCluster; -import org.apache.hadoop.compat.hdfs.HdfsCompatTestCommand; +import org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster; +import org.apache.hadoop.fs.compat.hdfs.HdfsCompatTestCommand; import org.apache.hadoop.conf.Configuration; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatFsCommand.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatFsCommand.java similarity index 95% rename from hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatFsCommand.java rename to hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatFsCommand.java index ed17fbced273d..0107b7c9ec788 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatFsCommand.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatFsCommand.java @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; -import org.apache.hadoop.compat.hdfs.HdfsCompatMiniCluster; +import org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.compat.cases.HdfsCompatAclTestCases; -import org.apache.hadoop.compat.cases.HdfsCompatMkdirTestCases; +import org.apache.hadoop.fs.compat.cases.HdfsCompatAclTestCases; +import org.apache.hadoop.fs.compat.cases.HdfsCompatMkdirTestCases; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.Assert; diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatInterfaceCoverage.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatInterfaceCoverage.java similarity index 94% rename from hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatInterfaceCoverage.java rename to hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatInterfaceCoverage.java index 77643560aaba3..f7a78fe20c6d8 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatInterfaceCoverage.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatInterfaceCoverage.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; -import org.apache.hadoop.compat.cases.implement.HdfsCompatFileSystemImpl; +import org.apache.hadoop.fs.compat.cases.implement.HdfsCompatFileSystemImpl; import org.apache.hadoop.fs.FileSystem; import org.junit.Assert; import org.junit.Ignore; diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatShellCommand.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatShellCommand.java similarity index 94% rename from hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatShellCommand.java rename to hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatShellCommand.java index 8d4c1678647c5..c3545625bb5a5 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/TestHdfsCompatShellCommand.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatShellCommand.java @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat; +package org.apache.hadoop.fs.compat; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.compat.hdfs.HdfsCompatMiniCluster; -import org.apache.hadoop.compat.hdfs.HdfsCompatTestCommand; -import org.apache.hadoop.compat.hdfs.HdfsCompatTestShellScope; +import org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster; +import org.apache.hadoop.fs.compat.hdfs.HdfsCompatTestCommand; +import org.apache.hadoop.fs.compat.hdfs.HdfsCompatTestShellScope; import org.apache.hadoop.conf.Configuration; import org.junit.After; import org.junit.Assert; diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatAclTestCases.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/cases/HdfsCompatAclTestCases.java similarity index 90% rename from hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatAclTestCases.java rename to hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/cases/HdfsCompatAclTestCases.java index 15baf04eac0ae..823f6ba2dd51f 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatAclTestCases.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/cases/HdfsCompatAclTestCases.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases; +package org.apache.hadoop.fs.compat.cases; -import org.apache.hadoop.compat.AbstractHdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatUtil; +import org.apache.hadoop.fs.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatUtil; import java.util.ArrayList; diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatMkdirTestCases.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/cases/HdfsCompatMkdirTestCases.java similarity index 83% rename from hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatMkdirTestCases.java rename to hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/cases/HdfsCompatMkdirTestCases.java index 26209dcacc8be..0fa58e7dadb6e 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/cases/HdfsCompatMkdirTestCases.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/cases/HdfsCompatMkdirTestCases.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.cases; +package org.apache.hadoop.fs.compat.cases; -import org.apache.hadoop.compat.AbstractHdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatCase; -import org.apache.hadoop.compat.HdfsCompatUtil; +import org.apache.hadoop.fs.compat.AbstractHdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatCase; +import org.apache.hadoop.fs.compat.HdfsCompatUtil; public class HdfsCompatMkdirTestCases extends AbstractHdfsCompatCase { @HdfsCompatCase diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatMiniCluster.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatMiniCluster.java similarity index 98% rename from hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatMiniCluster.java rename to hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatMiniCluster.java index ee6fe440edc9e..6de006418fd76 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatMiniCluster.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatMiniCluster.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.hdfs; +package org.apache.hadoop.fs.compat.hdfs; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestCommand.java similarity index 88% rename from hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java rename to hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestCommand.java index e998ee8f18d25..cfbca32764fe5 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestCommand.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestCommand.java @@ -15,11 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.hdfs; +package org.apache.hadoop.fs.compat.hdfs; -import org.apache.hadoop.compat.*; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.compat.HdfsCompatCommand; +import org.apache.hadoop.fs.compat.HdfsCompatEnvironment; +import org.apache.hadoop.fs.compat.HdfsCompatShellScope; +import org.apache.hadoop.fs.compat.HdfsCompatSuite; import java.io.IOException; import java.lang.reflect.Field; diff --git a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestShellScope.java similarity index 95% rename from hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java rename to hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestShellScope.java index 62e95eace0fb8..b63c07d1ac908 100644 --- a/hadoop-compat-bench/src/test/java/org/apache/hadoop/compat/hdfs/HdfsCompatTestShellScope.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestShellScope.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.compat.hdfs; +package org.apache.hadoop.fs.compat.hdfs; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.compat.HdfsCompatEnvironment; -import org.apache.hadoop.compat.HdfsCompatShellScope; -import org.apache.hadoop.compat.HdfsCompatSuite; +import org.apache.hadoop.fs.compat.HdfsCompatEnvironment; +import org.apache.hadoop.fs.compat.HdfsCompatShellScope; +import org.apache.hadoop.fs.compat.HdfsCompatSuite; import java.io.File; import java.io.FileOutputStream; diff --git a/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hadoop.sh b/hadoop-tools/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hadoop.sh similarity index 100% rename from hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hadoop.sh rename to hadoop-tools/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hadoop.sh diff --git a/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hdfs.sh b/hadoop-tools/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hdfs.sh similarity index 100% rename from hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hdfs.sh rename to hadoop-tools/hadoop-compat-bench/src/test/resources/hadoop-compat-bench-test-shell-hdfs.sh diff --git a/hadoop-compat-bench/src/test/resources/test-case-simple.t b/hadoop-tools/hadoop-compat-bench/src/test/resources/test-case-simple.t similarity index 100% rename from hadoop-compat-bench/src/test/resources/test-case-simple.t rename to hadoop-tools/hadoop-compat-bench/src/test/resources/test-case-simple.t diff --git a/hadoop-compat-bench/src/test/resources/test-case-skip.t b/hadoop-tools/hadoop-compat-bench/src/test/resources/test-case-skip.t similarity index 100% rename from hadoop-compat-bench/src/test/resources/test-case-skip.t rename to hadoop-tools/hadoop-compat-bench/src/test/resources/test-case-skip.t diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index 5816165f8ed11..8c1256a177cc4 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -52,6 +52,7 @@ hadoop-aliyun hadoop-fs2img hadoop-benchmark + hadoop-compat-bench From c8ccf81997c71f30ae6e8c1b67f3fe2ea6285de3 Mon Sep 17 00:00:00 2001 From: "chonghu.lh" Date: Thu, 29 Feb 2024 12:39:18 +0800 Subject: [PATCH 4/4] some refine for style check --- .../fs/compat/AbstractHdfsCompatCase.java | 9 +- .../hadoop/fs/compat/HdfsCompatApiScope.java | 49 +++++----- .../fs/compat/HdfsCompatEnvironment.java | 8 +- .../fs/compat/HdfsCompatShellScope.java | 8 +- .../hadoop/fs/compat/HdfsCompatUtil.java | 11 ++- .../hadoop/fs/compat/HdfsCompatibility.java | 91 ++++++++++--------- .../compat/cases/function/HdfsCompatAcl.java | 8 +- .../cases/function/HdfsCompatDirectory.java | 8 +- .../compat/cases/function/HdfsCompatFile.java | 26 +++--- .../cases/function/HdfsCompatLocal.java | 10 +- .../cases/function/HdfsCompatServer.java | 1 - .../function/HdfsCompatStoragePolicy.java | 12 +-- .../cases/function/HdfsCompatSymlink.java | 4 +- .../cases/function/HdfsCompatTpcds.java | 28 +++--- .../compat/cases/function/package-info.java | 23 +++++ .../implement/HdfsCompatFileSystemImpl.java | 1 - .../compat/cases/implement/package-info.java | 23 +++++ .../apache/hadoop/fs/compat/package-info.java | 24 +++++ .../hadoop/fs/compat/suites/package-info.java | 23 +++++ .../fs/compat/TestHdfsCompatFsCommand.java | 2 +- .../fs/compat/TestHdfsCompatShellCommand.java | 8 +- 21 files changed, 239 insertions(+), 138 deletions(-) create mode 100644 hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/package-info.java create mode 100644 hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/package-info.java create mode 100644 hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/package-info.java create mode 100644 hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/package-info.java diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/AbstractHdfsCompatCase.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/AbstractHdfsCompatCase.java index 0fa5e49f05efc..58917e19618ec 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/AbstractHdfsCompatCase.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/AbstractHdfsCompatCase.java @@ -25,17 +25,18 @@ import java.util.Random; public abstract class AbstractHdfsCompatCase { + private static final Random RANDOM = new Random(); + private FileSystem fs; private HdfsCompatEnvironment env; private Path localPath; - private static final Random random = new Random(); public AbstractHdfsCompatCase() { } - public void init(HdfsCompatEnvironment env) { + public void init(HdfsCompatEnvironment environment) { + this.env = environment; this.fs = env.getFileSystem(); - this.env = env; LocalFileSystem localFs = env.getLocalFileSystem(); this.localPath = localFs.makeQualified(new Path(env.getLocalTmpDir())); } @@ -58,7 +59,7 @@ public Path getUniquePath() { public static Path getUniquePath(Path basePath) { return new Path(basePath, System.currentTimeMillis() - + "_" + random.nextLong()); + + "_" + RANDOM.nextLong()); } public Path makePath(String name) { diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatApiScope.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatApiScope.java index eb572bb629674..219b7ee198935 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatApiScope.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatApiScope.java @@ -38,7 +38,7 @@ public class HdfsCompatApiScope { - static final boolean skipNoSuchMethodError = true; + static final boolean SKIP_NO_SUCH_METHOD_ERROR = true; private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatApiScope.class); @@ -60,27 +60,30 @@ public HdfsCompatReport apply() { final AbstractHdfsCompatCase obj = group.obj; GroupedResult groupedResult = new GroupedResult(obj, group.methods); - { // SetUp - groupedResult.setUp = test(group.setUp, obj); - } + // SetUp + groupedResult.setUp = test(group.setUp, obj); + if (groupedResult.setUp == Result.OK) { for (Method method : group.methods) { CaseResult caseResult = new CaseResult(); - { // Prepare - caseResult.prepareResult = test(group.prepare, obj); - } + + // Prepare + caseResult.prepareResult = test(group.prepare, obj); + if (caseResult.prepareResult == Result.OK) { // Case caseResult.methodResult = test(method, obj); } - { // Cleanup - caseResult.cleanupResult = test(group.cleanup, obj); - } + + // Cleanup + caseResult.cleanupResult = test(group.cleanup, obj); + groupedResult.results.put(getCaseName(method), caseResult); } } - { // TearDown - groupedResult.tearDown = test(group.tearDown, obj); - } + + // TearDown + groupedResult.tearDown = test(group.tearDown, obj); + groupedResult.exportTo(report); } return report; @@ -95,7 +98,7 @@ private Result test(Method method, AbstractHdfsCompatCase obj) { return Result.OK; } catch (InvocationTargetException t) { Throwable e = t.getCause(); - if (skipNoSuchMethodError && (e instanceof NoSuchMethodError)) { + if (SKIP_NO_SUCH_METHOD_ERROR && (e instanceof NoSuchMethodError)) { LOG.warn("Case skipped with method " + method.getName() + " of class " + obj.getClass(), e); return Result.SKIP; @@ -150,8 +153,9 @@ public static Set getPublicInterfaces(Class cls) { return publicMethodNames; } - private static class GroupedCase { - private static final Map> definedMethods = new HashMap<>(); + private static final class GroupedCase { + private static final Map> DEFINED_METHODS = + new HashMap<>(); private final AbstractHdfsCompatCase obj; private final List methods; private final Method setUp; @@ -240,11 +244,12 @@ private static GroupedCase parse(Class cls, private static synchronized boolean checkDefined(String ifDef) { String[] classAndMethod = ifDef.split("#", 2); if (classAndMethod.length < 2) { - throw new HdfsCompatIllegalCaseException("ifDef must be with format className#methodName"); + throw new HdfsCompatIllegalCaseException( + "ifDef must be with format className#methodName"); } final String className = classAndMethod[0]; final String methodName = classAndMethod[1]; - Set methods = definedMethods.getOrDefault(className, null); + Set methods = DEFINED_METHODS.getOrDefault(className, null); if (methods != null) { return methods.contains(methodName); } @@ -255,13 +260,13 @@ private static synchronized boolean checkDefined(String ifDef) { throw new HdfsCompatIllegalCaseException(e.getMessage()); } methods = getPublicInterfaces(cls); - definedMethods.put(className, methods); + DEFINED_METHODS.put(className, methods); return methods.contains(methodName); } } - private static class GroupedResult { - private static final int commonPrefixLength = AbstractHdfsCompatCase.class + private static final class GroupedResult { + private static final int COMMON_PREFIX_LEN = AbstractHdfsCompatCase.class .getPackage().getName().length() + ".cases.".length(); private final String prefix; private Result setUp; @@ -323,7 +328,7 @@ private void exportTo(HdfsCompatReport report) { } private static String getNamePrefix(Class cls) { - return (cls.getPackage().getName() + ".").substring(commonPrefixLength) + + return (cls.getPackage().getName() + ".").substring(COMMON_PREFIX_LEN) + getGroupName(cls) + "."; } diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatEnvironment.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatEnvironment.java index 408581e42aa39..6739592385042 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatEnvironment.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatEnvironment.java @@ -41,8 +41,8 @@ public class HdfsCompatEnvironment { private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatEnvironment.class); - private static final String dateFormat = "yyyy_MM_dd_HH_mm_ss"; - private static final Random random = new Random(); + private static final String DATE_FORMAT = "yyyy_MM_dd_HH_mm_ss"; + private static final Random RANDOM = new Random(); private final Path uri; private final Configuration conf; private FileSystem fs; @@ -61,7 +61,7 @@ public void init() throws IOException { Date now = new Date(); String uuid = UUID.randomUUID().toString(); String uniqueDir = "hadoop-compatibility-benchmark/" + - new SimpleDateFormat(dateFormat).format(now) + "/" + uuid; + new SimpleDateFormat(DATE_FORMAT).format(now) + "/" + uuid; this.fs = uri.getFileSystem(conf); this.localFs = FileSystem.getLocal(conf); @@ -135,7 +135,7 @@ private String getEnvTmpDir() { return null; } final String tmpDir = validDirs.get( - random.nextInt(validDirs.size())); + RANDOM.nextInt(validDirs.size())); return new File(tmpDir).getAbsolutePath(); } diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatShellScope.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatShellScope.java index 4753dd1f797b6..fef6d8bea9191 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatShellScope.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatShellScope.java @@ -50,7 +50,7 @@ public class HdfsCompatShellScope { private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatShellScope.class); - private static final Random random = new Random(); + private static final Random RANDOM = new Random(); private final HdfsCompatEnvironment env; private final HdfsCompatSuite suite; private File stdoutDir = null; @@ -243,7 +243,7 @@ private String getStoragePolicy() { return defPolicyName; } else { return differentPolicies.get( - random.nextInt(differentPolicies.size())); + RANDOM.nextInt(differentPolicies.size())); } } @@ -382,7 +382,7 @@ private List readLines(File file) throws IOException { return lines; } - private static class StreamPrinter extends Thread { + private static final class StreamPrinter extends Thread { private final InputStream in; private final List lines; @@ -406,7 +406,7 @@ public void run() { } } - private static class ExecResult { + private static final class ExecResult { private final int code; private final List out; private final List err; diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatUtil.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatUtil.java index 813b902327d02..0634faae10776 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatUtil.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatUtil.java @@ -30,10 +30,13 @@ import java.io.IOException; import java.util.Random; -public class HdfsCompatUtil { +public final class HdfsCompatUtil { private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatUtil.class); - private static final Random random = new Random(); + private static final Random RANDOM = new Random(); + + private HdfsCompatUtil() { + } public static void checkImplementation(ImplementationFunction func) { try { @@ -41,7 +44,7 @@ public static void checkImplementation(ImplementationFunction func) { } catch (UnsupportedOperationException e) { throw e; } catch (NoSuchMethodError e) { - if (HdfsCompatApiScope.skipNoSuchMethodError) { + if (HdfsCompatApiScope.SKIP_NO_SUCH_METHOD_ERROR) { throw e; } else { throw new UnsupportedOperationException(e); @@ -70,7 +73,7 @@ public static void createFile(FileSystem fs, Path file, boolean overwrite, byte[] toWrite = new byte[bufferSize]; long bytesToWrite = fileLen; while (bytesToWrite > 0) { - random.nextBytes(toWrite); + RANDOM.nextBytes(toWrite); int bytesToWriteNext = (bufferSize < bytesToWrite) ? bufferSize : (int) bytesToWrite; out.write(toWrite, 0, bytesToWriteNext); diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatibility.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatibility.java index dc5464abf9663..d26607a225abd 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatibility.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/HdfsCompatibility.java @@ -113,29 +113,29 @@ private int doRun() throws Exception { HdfsCompatCommand cmd = new HdfsCompatCommand(uri, suite, getConf()); cmd.initialize(); HdfsCompatReport report = cmd.apply(); - OutputStream out = null; + OutputStream outputFile = null; try { if (this.output != null) { - out = new FileOutputStream(new File(this.output)); + outputFile = new FileOutputStream(new File(this.output)); } } catch (Exception e) { LOG.error("Create output file failed", e); - out = null; + outputFile = null; } try { - printReport(report, out); + printReport(report, outputFile); } finally { - IOUtils.closeStream(out); + IOUtils.closeStream(outputFile); } return 0; } private boolean isHelp(String[] args) { - return (args == null) || (args.length == 0) || ( - (args.length == 1) && ( - args[0].equalsIgnoreCase("-h") || - args[0].equalsIgnoreCase("--help")) - ); + if ((args == null) || (args.length == 0)) { + return true; + } + return (args.length == 1) && (args[0].equalsIgnoreCase("-h") || + args[0].equalsIgnoreCase("--help")); } private void parseArgs(String[] args) { @@ -167,48 +167,49 @@ void printOut(String message) { out.println(message); } - void printReport(HdfsCompatReport report, OutputStream out) + void printReport(HdfsCompatReport report, OutputStream detailStream) throws IOException { StringBuilder buffer = new StringBuilder(); - { // Line 1: - buffer.append("Hadoop Compatibility Report for "); - buffer.append(report.getSuite().getSuiteName()); - buffer.append(":\n"); - } - { // Line 2: - long passed = report.getPassedCase().size(); - long failed = report.getFailedCase().size(); - String percent = (failed == 0) ? "100" : String.format("%.2f", - ((double) passed) / ((double) (passed + failed)) * 100); - buffer.append("\t"); - buffer.append(percent); - buffer.append("%, PASSED "); - buffer.append(passed); - buffer.append(" OVER "); - buffer.append(passed + failed); - buffer.append("\n"); - } - { // Line 3: - buffer.append("\tURI: "); - buffer.append(report.getUri()); - if (report.getSuite() != null) { - buffer.append(" (suite: "); - buffer.append(report.getSuite().getClass().getName()); - buffer.append(")"); - } - buffer.append("\n"); - } - { // Line 4: - buffer.append("\tHadoop Version as Baseline: "); - buffer.append(VersionInfo.getVersion()); + + // Line 1: + buffer.append("Hadoop Compatibility Report for "); + buffer.append(report.getSuite().getSuiteName()); + buffer.append(":\n"); + + // Line 2: + long passed = report.getPassedCase().size(); + long failed = report.getFailedCase().size(); + String percent = (failed == 0) ? "100" : String.format("%.2f", + ((double) passed) / ((double) (passed + failed)) * 100); + buffer.append("\t"); + buffer.append(percent); + buffer.append("%, PASSED "); + buffer.append(passed); + buffer.append(" OVER "); + buffer.append(passed + failed); + buffer.append("\n"); + + // Line 3: + buffer.append("\tURI: "); + buffer.append(report.getUri()); + if (report.getSuite() != null) { + buffer.append(" (suite: "); + buffer.append(report.getSuite().getClass().getName()); + buffer.append(")"); } + buffer.append("\n"); + + // Line 4: + buffer.append("\tHadoop Version as Baseline: "); + buffer.append(VersionInfo.getVersion()); + final String shortMessage = buffer.toString(); printOut(shortMessage); - if (out != null) { - out.write(shortMessage.getBytes(StandardCharsets.UTF_8)); + if (detailStream != null) { + detailStream.write(shortMessage.getBytes(StandardCharsets.UTF_8)); BufferedWriter writer = new BufferedWriter( - new OutputStreamWriter(out, StandardCharsets.UTF_8)); + new OutputStreamWriter(detailStream, StandardCharsets.UTF_8)); writer.newLine(); writer.write("PASSED CASES:"); writer.newLine(); diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatAcl.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatAcl.java index 5138b890bed0f..aea4f1d2e5916 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatAcl.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatAcl.java @@ -31,9 +31,9 @@ @HdfsCompatCaseGroup(name = "ACL") public class HdfsCompatAcl extends AbstractHdfsCompatCase { - private static final String initFileAcl = + private static final String INIT_FILE_ACL = "user::rwx,group::rwx,other::rwx,user:foo:rwx"; - private static final String initDirAcl = + private static final String INIT_DIR_ACL = "default:user::rwx,default:group::rwx,default:other::rwx"; private Path dir; private Path file; @@ -43,9 +43,9 @@ public void prepare() throws IOException { this.dir = makePath("dir"); this.file = new Path(this.dir, "file"); HdfsCompatUtil.createFile(fs(), this.file, 0); - List entries = AclEntry.parseAclSpec(initDirAcl, true); + List entries = AclEntry.parseAclSpec(INIT_DIR_ACL, true); fs().setAcl(dir, entries); - entries = AclEntry.parseAclSpec(initFileAcl, true); + entries = AclEntry.parseAclSpec(INIT_FILE_ACL, true); fs().setAcl(file, entries); } diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatDirectory.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatDirectory.java index bf58a1fd1f846..df7488e32c021 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatDirectory.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatDirectory.java @@ -27,7 +27,7 @@ @HdfsCompatCaseGroup(name = "Directory") public class HdfsCompatDirectory extends AbstractHdfsCompatCase { - private static final int fileLen = 128; + private static final int FILE_LEN = 128; private Path dir = null; private Path file = null; @@ -35,7 +35,7 @@ public class HdfsCompatDirectory extends AbstractHdfsCompatCase { public void prepare() throws IOException { this.dir = makePath("dir"); this.file = new Path(this.dir, "file"); - HdfsCompatUtil.createFile(fs(), file, fileLen); + HdfsCompatUtil.createFile(fs(), file, FILE_LEN); } @HdfsCompatCaseCleanup @@ -114,13 +114,13 @@ public void getContentSummary() throws IOException { ContentSummary summary = fs().getContentSummary(dir); Assert.assertEquals(1, summary.getFileCount()); Assert.assertEquals(1, summary.getDirectoryCount()); - Assert.assertEquals(fileLen, summary.getLength()); + Assert.assertEquals(FILE_LEN, summary.getLength()); } @HdfsCompatCase public void getUsed() throws IOException { long used = fs().getUsed(dir); - Assert.assertTrue(used >= fileLen); + Assert.assertTrue(used >= FILE_LEN); } @HdfsCompatCase diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatFile.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatFile.java index ba2159148b3cb..251c5371a3b7f 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatFile.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatFile.java @@ -32,17 +32,17 @@ @HdfsCompatCaseGroup(name = "File") public class HdfsCompatFile extends AbstractHdfsCompatCase { - private static final int fileLen = 128; - private static final long blockSize = 1048576; - private static final short replication = 1; - private static final Random random = new Random(); + private static final int FILE_LEN = 128; + private static final long BLOCK_SIZE = 1048576; + private static final short REPLICATION = 1; + private static final Random RANDOM = new Random(); private Path file = null; @HdfsCompatCasePrepare public void prepare() throws IOException { this.file = makePath("file"); HdfsCompatUtil.createFile(fs(), this.file, true, - 1024, fileLen, blockSize, replication); + 1024, FILE_LEN, BLOCK_SIZE, REPLICATION); } @HdfsCompatCaseCleanup @@ -69,12 +69,12 @@ public void isFile() throws IOException { @HdfsCompatCase public void getLength() throws IOException { - Assert.assertEquals(fileLen, fs().getLength(file)); + Assert.assertEquals(FILE_LEN, fs().getLength(file)); } @HdfsCompatCase(brief = "arbitrary blockSize") public void getBlockSize() throws IOException { - Assert.assertEquals(blockSize, fs().getBlockSize(file)); + Assert.assertEquals(BLOCK_SIZE, fs().getBlockSize(file)); } @HdfsCompatCase @@ -110,7 +110,7 @@ public void cancelDeleteOnExit() throws IOException { @HdfsCompatCase public void truncate() throws IOException, InterruptedException { - int newLen = random.nextInt(fileLen); + int newLen = RANDOM.nextInt(FILE_LEN); boolean finished = fs().truncate(file, newLen); while (!finished) { Thread.sleep(1000); @@ -122,8 +122,8 @@ public void truncate() throws IOException, InterruptedException { @HdfsCompatCase public void setOwner() throws Exception { - final String owner = "test_" + random.nextInt(1024); - final String group = "test_" + random.nextInt(1024); + final String owner = "test_" + RANDOM.nextInt(1024); + final String group = "test_" + RANDOM.nextInt(1024); final String privileged = getPrivilegedUser(); UserGroupInformation.createRemoteUser(privileged).doAs( (PrivilegedExceptionAction) () -> { @@ -174,7 +174,7 @@ public void getFileChecksum() throws IOException { @HdfsCompatCase public void getFileBlockLocations() throws IOException { - BlockLocation[] locations = fs().getFileBlockLocations(file, 0, fileLen); + BlockLocation[] locations = fs().getFileBlockLocations(file, 0, FILE_LEN); Assert.assertTrue(locations.length >= 1); BlockLocation location = locations[0]; Assert.assertTrue(location.getLength() > 0); @@ -182,7 +182,7 @@ public void getFileBlockLocations() throws IOException { @HdfsCompatCase public void getReplication() throws IOException { - Assert.assertEquals(replication, fs().getReplication(file)); + Assert.assertEquals(REPLICATION, fs().getReplication(file)); } @HdfsCompatCase(brief = "arbitrary replication") @@ -195,7 +195,7 @@ public void setReplication() throws IOException { public void getPathHandle() throws IOException { FileStatus status = fs().getFileStatus(file); PathHandle handle = fs().getPathHandle(status, Options.HandleOpt.path()); - final int maxReadLen = Math.min(fileLen, 4096); + final int maxReadLen = Math.min(FILE_LEN, 4096); byte[] data = new byte[maxReadLen]; try (FSDataInputStream in = fs().open(handle, 1024)) { in.readFully(data); diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatLocal.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatLocal.java index 52404f116a8d7..4464d7aa4bf14 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatLocal.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatLocal.java @@ -28,8 +28,8 @@ @HdfsCompatCaseGroup(name = "Local") public class HdfsCompatLocal extends AbstractHdfsCompatCase { - private static final int fileLen = 128; - private static final Random random = new Random(); + private static final int FILE_LEN = 128; + private static final Random RANDOM = new Random(); private LocalFileSystem localFs; private Path localBasePath; private Path localSrc; @@ -51,13 +51,13 @@ public void tearDown() { @HdfsCompatCasePrepare public void prepare() throws IOException { final String unique = System.currentTimeMillis() - + "_" + random.nextLong() + "/"; + + "_" + RANDOM.nextLong() + "/"; this.localSrc = new Path(localBasePath, unique + "src"); this.localDst = new Path(localBasePath, unique + "dst"); this.src = new Path(getBasePath(), unique + "src"); this.dst = new Path(getBasePath(), unique + "dst"); - HdfsCompatUtil.createFile(localFs, this.localSrc, fileLen); - HdfsCompatUtil.createFile(fs(), this.src, fileLen); + HdfsCompatUtil.createFile(localFs, this.localSrc, FILE_LEN); + HdfsCompatUtil.createFile(fs(), this.src, FILE_LEN); } @HdfsCompatCaseCleanup diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatServer.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatServer.java index 9f1c62c1139bd..d6d134393b635 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatServer.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatServer.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.fs.compat.cases.function; - import org.apache.hadoop.fs.compat.AbstractHdfsCompatCase; import org.apache.hadoop.fs.compat.HdfsCompatCase; import org.apache.hadoop.fs.compat.HdfsCompatCaseGroup; diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatStoragePolicy.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatStoragePolicy.java index b87f64d78eedc..99a0daa49a1e8 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatStoragePolicy.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatStoragePolicy.java @@ -33,7 +33,7 @@ public class HdfsCompatStoragePolicy extends AbstractHdfsCompatCase { private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatStoragePolicy.class); - private static final Random random = new Random(); + private static final Random RANDOM = new Random(); private Path dir; private Path file; private String[] policies; @@ -66,7 +66,7 @@ public void prepare() throws IOException { this.policyName = defaultPolicyName; } else { this.policyName = differentPolicies.get( - random.nextInt(differentPolicies.size())); + RANDOM.nextInt(differentPolicies.size())); } } @@ -87,8 +87,8 @@ public void unsetStoragePolicy() throws IOException { fs().setStoragePolicy(dir, policyName); fs().unsetStoragePolicy(dir); BlockStoragePolicySpi policy = fs().getStoragePolicy(dir); - String policyName = (policy == null) ? null : policy.getName(); - Assert.assertEquals(defaultPolicyName, policyName); + String policyNameAfterUnset = (policy == null) ? null : policy.getName(); + Assert.assertEquals(defaultPolicyName, policyNameAfterUnset); } @HdfsCompatCase(ifDef = "org.apache.hadoop.fs.FileSystem#satisfyStoragePolicy") @@ -100,7 +100,7 @@ public void satisfyStoragePolicy() throws IOException { @HdfsCompatCase public void getStoragePolicy() throws IOException { BlockStoragePolicySpi policy = fs().getStoragePolicy(file); - String policyName = (policy == null) ? null : policy.getName(); - Assert.assertEquals(defaultPolicyName, policyName); + String initialPolicyName = (policy == null) ? null : policy.getName(); + Assert.assertEquals(defaultPolicyName, initialPolicyName); } } \ No newline at end of file diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSymlink.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSymlink.java index 5836155a5abe7..198214756797b 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSymlink.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatSymlink.java @@ -27,7 +27,7 @@ @HdfsCompatCaseGroup(name = "Symlink") public class HdfsCompatSymlink extends AbstractHdfsCompatCase { - private static final int fileLen = 128; + private static final int FILE_LEN = 128; private Path target = null; private Path link = null; @@ -40,7 +40,7 @@ public void setUp() { public void prepare() throws IOException { this.target = makePath("target"); this.link = new Path(this.target.getParent(), "link"); - HdfsCompatUtil.createFile(fs(), this.target, fileLen); + HdfsCompatUtil.createFile(fs(), this.target, FILE_LEN); fs().createSymlink(this.target, this.link, true); } diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatTpcds.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatTpcds.java index 1dfe3ca4d9e8b..f03136e482e2c 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatTpcds.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/HdfsCompatTpcds.java @@ -28,8 +28,8 @@ @HdfsCompatCaseGroup(name = "TPCDS") public class HdfsCompatTpcds extends AbstractHdfsCompatCase { - private static final int fileLen = 8; - private static final Random random = new Random(); + private static final int FILE_LEN = 8; + private static final Random RANDOM = new Random(); private Path path = null; @HdfsCompatCasePrepare @@ -44,8 +44,8 @@ public void cleanup() throws IOException { @HdfsCompatCase public void open() throws IOException { - HdfsCompatUtil.createFile(fs(), path, fileLen); - byte[] data = new byte[fileLen]; + HdfsCompatUtil.createFile(fs(), path, FILE_LEN); + byte[] data = new byte[FILE_LEN]; try (FSDataInputStream in = fs().open(path)) { in.readFully(data); } @@ -53,8 +53,8 @@ public void open() throws IOException { @HdfsCompatCase public void create() throws IOException { - byte[] data = new byte[fileLen]; - random.nextBytes(data); + byte[] data = new byte[FILE_LEN]; + RANDOM.nextBytes(data); try (FSDataOutputStream out = fs().create(path, true)) { out.write(data); } @@ -67,40 +67,40 @@ public void mkdirs() throws IOException { @HdfsCompatCase public void getFileStatus() throws IOException { - HdfsCompatUtil.createFile(fs(), path, fileLen); + HdfsCompatUtil.createFile(fs(), path, FILE_LEN); FileStatus fileStatus = fs().getFileStatus(path); - Assert.assertEquals(fileLen, fileStatus.getLen()); + Assert.assertEquals(FILE_LEN, fileStatus.getLen()); } @HdfsCompatCase public void listStatus() throws IOException { - HdfsCompatUtil.createFile(fs(), new Path(path, "file"), fileLen); + HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN); FileStatus[] files = fs().listStatus(path); Assert.assertEquals(1, files.length); - Assert.assertEquals(fileLen, files[0].getLen()); + Assert.assertEquals(FILE_LEN, files[0].getLen()); } @HdfsCompatCase public void listLocatedStatus() throws IOException { - HdfsCompatUtil.createFile(fs(), new Path(path, "file"), fileLen); + HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN); RemoteIterator it = fs().listLocatedStatus(path); List files = new ArrayList<>(); while (it.hasNext()) { files.add(it.next()); } Assert.assertEquals(1, files.size()); - Assert.assertEquals(fileLen, files.get(0).getLen()); + Assert.assertEquals(FILE_LEN, files.get(0).getLen()); } @HdfsCompatCase public void rename() throws IOException { - HdfsCompatUtil.createFile(fs(), new Path(path, "file"), fileLen); + HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN); fs().rename(path, new Path(path.getParent(), path.getName() + "_dst")); } @HdfsCompatCase public void delete() throws IOException { - HdfsCompatUtil.createFile(fs(), new Path(path, "file"), fileLen); + HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN); fs().delete(path, true); } diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/package-info.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/package-info.java new file mode 100644 index 0000000000000..42810160812bc --- /dev/null +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/function/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This contains default functional cases for + * {@link org.apache.hadoop.fs.FileSystem} APIs. + */ +package org.apache.hadoop.fs.compat.cases.function; \ No newline at end of file diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/HdfsCompatFileSystemImpl.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/HdfsCompatFileSystemImpl.java index db296c4938f8a..cdd6708d59ee3 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/HdfsCompatFileSystemImpl.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/HdfsCompatFileSystemImpl.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.fs.compat.cases.implement; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.compat.AbstractHdfsCompatCase; import org.apache.hadoop.fs.compat.HdfsCompatCase; diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/package-info.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/package-info.java new file mode 100644 index 0000000000000..4eefed28f2cdc --- /dev/null +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/implement/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This contains default implementation cases for + * {@link org.apache.hadoop.fs.FileSystem} APIs. + */ +package org.apache.hadoop.fs.compat.cases.implement; \ No newline at end of file diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/package-info.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/package-info.java new file mode 100644 index 0000000000000..342e6869bbfa8 --- /dev/null +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/package-info.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * HdfsCompatibility is a benchmark tool to quickly assess availabilities + * of Hadoop-Compatible File System APIs defined in + * {@link org.apache.hadoop.fs.FileSystem} for a specific FS implementation. + */ +package org.apache.hadoop.fs.compat; \ No newline at end of file diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/package-info.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/package-info.java new file mode 100644 index 0000000000000..85edb97a8730d --- /dev/null +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This contains default suites for + * {@link org.apache.hadoop.fs.compat.HdfsCompatibility} command. + */ +package org.apache.hadoop.fs.compat.suites; \ No newline at end of file diff --git a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatFsCommand.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatFsCommand.java index 0107b7c9ec788..813f425592411 100644 --- a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatFsCommand.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatFsCommand.java @@ -94,7 +94,7 @@ private void cleanup(HdfsCompatCommand cmd, Configuration conf) throws Exception fs.delete(basePath, true); } - private static class TestCommand extends HdfsCompatCommand { + private static final class TestCommand extends HdfsCompatCommand { private TestCommand(String uri, String suiteName, Configuration conf) { super(uri, suiteName, conf); } diff --git a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatShellCommand.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatShellCommand.java index c3545625bb5a5..f983a38041c07 100644 --- a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatShellCommand.java +++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/TestHdfsCompatShellCommand.java @@ -75,7 +75,7 @@ private void show(Configuration conf, HdfsCompatReport report) throws IOExceptio new HdfsCompatibility(conf).printReport(report, System.out); } - private static class TestCommand extends HdfsCompatTestCommand { + private static final class TestCommand extends HdfsCompatTestCommand { private TestCommand(String uri, Configuration conf) { super(uri, "shell", conf); } @@ -86,7 +86,7 @@ protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsComp } } - private static class TestSkipCommand extends HdfsCompatTestCommand { + private static final class TestSkipCommand extends HdfsCompatTestCommand { private TestSkipCommand(String uri, Configuration conf) { super(uri, "shell", conf); } @@ -98,7 +98,7 @@ protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsComp } private static class TestShellScope extends HdfsCompatTestShellScope { - public TestShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + private TestShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { super(env, suite); } @@ -112,7 +112,7 @@ protected void replace(File scriptDir) throws IOException { } private static class TestShellScopeForSkip extends HdfsCompatTestShellScope { - public TestShellScopeForSkip(HdfsCompatEnvironment env, HdfsCompatSuite suite) { + private TestShellScopeForSkip(HdfsCompatEnvironment env, HdfsCompatSuite suite) { super(env, suite); }