Skip to content

Commit 9965c28

Browse files
authored
Merge branch 'apache:trunk' into HADOOP-19136
2 parents 6c7dae2 + ecf665c commit 9965c28

File tree

51 files changed

+2320
-2872
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+2320
-2872
lines changed

BUILDING.txt

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -163,14 +163,7 @@ Maven build goals:
163163
YARN Application Timeline Service V2 build options:
164164

165165
YARN Timeline Service v.2 chooses Apache HBase as the primary backing storage. The supported
166-
versions of Apache HBase are 1.7.1 (default) and 2.2.4.
167-
168-
* HBase 1.7.1 is used by default to build Hadoop. The official releases are ready to use if you
169-
plan on running Timeline Service v2 with HBase 1.7.1.
170-
171-
* Use -Dhbase.profile=2.0 to build Hadoop with HBase 2.2.4. Provide this option if you plan
172-
on running Timeline Service v2 with HBase 2.x.
173-
166+
version of Apache HBase is 2.5.8.
174167

175168
Snappy build options:
176169

LICENSE-binary

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -310,10 +310,10 @@ org.apache.commons:commons-validator:1.6
310310
org.apache.curator:curator-client:5.2.0
311311
org.apache.curator:curator-framework:5.2.0
312312
org.apache.curator:curator-recipes:5.2.0
313-
org.apache.hbase:hbase-annotations:1.7.1
314-
org.apache.hbase:hbase-client:1.7.1
315-
org.apache.hbase:hbase-common:1.7.1
316-
org.apache.hbase:hbase-protocol:1.7.1
313+
org.apache.hbase:hbase-annotations:2.5.8
314+
org.apache.hbase:hbase-client:2.5.8
315+
org.apache.hbase:hbase-common:2.5.8
316+
org.apache.hbase:hbase-protocol:2.5.8
317317
org.apache.htrace:htrace-core:3.1.0-incubating
318318
org.apache.htrace:htrace-core4:4.1.0-incubating
319319
org.apache.httpcomponents:httpclient:4.5.13

hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@
245245
</moduleSet>
246246
<moduleSet>
247247
<includes>
248-
<include>org.apache.hadoop:${hbase-server-artifactid}</include>
248+
<include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-server-2</include>
249249
</includes>
250250
<binaries>
251251
<outputDirectory>share/hadoop/${hadoop.component}/timelineservice</outputDirectory>

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/FutureIO.java

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,10 @@
2121
import java.io.IOException;
2222
import java.io.InterruptedIOException;
2323
import java.io.UncheckedIOException;
24+
import java.time.Duration;
25+
import java.util.ArrayList;
26+
import java.util.Collection;
27+
import java.util.List;
2428
import java.util.Map;
2529
import java.util.concurrent.CompletableFuture;
2630
import java.util.concurrent.CompletionException;
@@ -34,6 +38,9 @@
3438
import org.apache.hadoop.conf.Configuration;
3539
import org.apache.hadoop.fs.FSBuilder;
3640

41+
import org.slf4j.Logger;
42+
import org.slf4j.LoggerFactory;
43+
3744
/**
3845
* Future IO Helper methods.
3946
* <p>
@@ -55,6 +62,7 @@
5562
@InterfaceStability.Unstable
5663
public final class FutureIO {
5764

65+
private static final Logger LOG = LoggerFactory.getLogger(FutureIO.class.getName());
5866
private FutureIO() {
5967
}
6068

@@ -114,6 +122,77 @@ public static <T> T awaitFuture(final Future<T> future,
114122
}
115123
}
116124

125+
/**
126+
* Evaluates a collection of futures and returns their results as a list.
127+
* <p>
128+
* This method blocks until all futures in the collection have completed.
129+
* If any future throws an exception during its execution, this method
130+
* extracts and rethrows that exception.
131+
* </p>
132+
*
133+
* @param collection collection of futures to be evaluated
134+
* @param <T> type of the result.
135+
* @return the list of future's result, if all went well.
136+
* @throws InterruptedIOException future was interrupted
137+
* @throws IOException if something went wrong
138+
* @throws RuntimeException any nested RTE thrown
139+
*/
140+
public static <T> List<T> awaitAllFutures(final Collection<Future<T>> collection)
141+
throws InterruptedIOException, IOException, RuntimeException {
142+
List<T> results = new ArrayList<>();
143+
try {
144+
for (Future<T> future : collection) {
145+
results.add(future.get());
146+
}
147+
return results;
148+
} catch (InterruptedException e) {
149+
LOG.debug("Execution of future interrupted ", e);
150+
throw (InterruptedIOException) new InterruptedIOException(e.toString())
151+
.initCause(e);
152+
} catch (ExecutionException e) {
153+
LOG.debug("Execution of future failed with exception", e.getCause());
154+
return raiseInnerCause(e);
155+
}
156+
}
157+
158+
/**
159+
* Evaluates a collection of futures and returns their results as a list,
160+
* but only waits up to the specified timeout for each future to complete.
161+
* <p>
162+
* This method blocks until all futures in the collection have completed or
163+
* the timeout expires, whichever happens first. If any future throws an
164+
* exception during its execution, this method extracts and rethrows that exception.
165+
* </p>
166+
*
167+
* @param collection collection of futures to be evaluated
168+
* @param duration timeout duration
169+
* @param <T> type of the result.
170+
* @return the list of future's result, if all went well.
171+
* @throws InterruptedIOException future was interrupted
172+
* @throws IOException if something went wrong
173+
* @throws RuntimeException any nested RTE thrown
174+
* @throws TimeoutException the future timed out.
175+
*/
176+
public static <T> List<T> awaitAllFutures(final Collection<Future<T>> collection,
177+
final Duration duration)
178+
throws InterruptedIOException, IOException, RuntimeException,
179+
TimeoutException {
180+
List<T> results = new ArrayList<>();
181+
try {
182+
for (Future<T> future : collection) {
183+
results.add(future.get(duration.toMillis(), TimeUnit.MILLISECONDS));
184+
}
185+
return results;
186+
} catch (InterruptedException e) {
187+
LOG.debug("Execution of future interrupted ", e);
188+
throw (InterruptedIOException) new InterruptedIOException(e.toString())
189+
.initCause(e);
190+
} catch (ExecutionException e) {
191+
LOG.debug("Execution of future failed with exception", e.getCause());
192+
return raiseInnerCause(e);
193+
}
194+
}
195+
117196
/**
118197
* From the inner cause of an execution exception, extract the inner cause
119198
* if it is an IOE or RTE.

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1089,13 +1089,7 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
10891089
DatanodeInfo[] getCachedDatanodeReport(DatanodeReportType type)
10901090
throws IOException {
10911091
try {
1092-
DatanodeInfo[] dns = this.dnCache.get(type);
1093-
if (dns == null) {
1094-
LOG.debug("Get null DN report from cache");
1095-
dns = getCachedDatanodeReportImpl(type);
1096-
this.dnCache.put(type, dns);
1097-
}
1098-
return dns;
1092+
return this.dnCache.get(type);
10991093
} catch (ExecutionException e) {
11001094
LOG.error("Cannot get the DN report for {}", type, e);
11011095
Throwable cause = e.getCause();

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -993,6 +993,7 @@ public boolean complete(String src, String clientName,
993993
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
994994
checkNNStartup();
995995
namesystem.reportBadBlocks(blocks);
996+
namesystem.logAuditEvent(true, "reportBadBlocks", null);
996997
}
997998

998999
@Override // ClientProtocol

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
3434
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
3535
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
36+
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
3637
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
3738
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
3839
import org.apache.hadoop.ipc.RPC;
@@ -1218,6 +1219,25 @@ public void testDeleteRoot() throws Exception {
12181219
verifyAuditLogs(aceDeletePattern);
12191220
}
12201221

1222+
@Test
1223+
public void testReportBadBlocks() throws IOException {
1224+
String auditLogString =
1225+
".*allowed=true.*cmd=reportBadBlocks.*";
1226+
FSNamesystem fsNamesystem = spy(cluster.getNamesystem());
1227+
when(fsNamesystem.isExternalInvocation()).thenReturn(true);
1228+
Server.Call call = spy(new Server.Call(
1229+
1, 1, null, null, RPC.RpcKind.RPC_BUILTIN, new byte[] {1, 2, 3}));
1230+
when(call.getRemoteUser()).thenReturn(
1231+
UserGroupInformation.createRemoteUser(System.getProperty("user.name")));
1232+
Server.getCurCall().set(call);
1233+
try {
1234+
cluster.getNameNodeRpc().reportBadBlocks(new LocatedBlock[]{});
1235+
verifyAuditLogs(auditLogString);
1236+
} catch (Exception e) {
1237+
fail(" The operation threw an exception" + e);
1238+
}
1239+
}
1240+
12211241
private void verifyAuditRestoreFailedStorageACE(
12221242
FSNamesystem fsNamesystem, String arg) throws IOException {
12231243
String operationName = fsNamesystem.getFailedStorageCommand(arg);

hadoop-project/pom.xml

Lines changed: 19 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -203,8 +203,7 @@
203203
<swagger-annotations-version>1.5.4</swagger-annotations-version>
204204
<snakeyaml.version>2.0</snakeyaml.version>
205205
<sshd.version>2.11.0</sshd.version>
206-
<hbase.one.version>1.7.1</hbase.one.version>
207-
<hbase.two.version>2.2.4</hbase.two.version>
206+
<hbase.version>2.5.8-hadoop3</hbase.version>
208207
<junit.version>4.13.2</junit.version>
209208
<junit.jupiter.version>5.8.2</junit.jupiter.version>
210209
<junit.vintage.version>5.8.2</junit.vintage.version>
@@ -501,6 +500,11 @@
501500
<version>${hadoop.version}</version>
502501
<type>test-jar</type>
503502
</dependency>
503+
<dependency>
504+
<groupId>org.apache.hadoop</groupId>
505+
<artifactId>hadoop-yarn-server-timelineservice-hbase-server-2</artifactId>
506+
<version>${hadoop.version}</version>
507+
</dependency>
504508

505509
<dependency>
506510
<groupId>org.apache.hadoop</groupId>
@@ -1811,6 +1815,10 @@
18111815
<groupId>org.apache.yetus</groupId>
18121816
<artifactId>audience-annotations</artifactId>
18131817
</exclusion>
1818+
<exclusion>
1819+
<groupId>org.osgi</groupId>
1820+
<artifactId>org.osgi.core</artifactId>
1821+
</exclusion>
18141822
</exclusions>
18151823
</dependency>
18161824
<dependency>
@@ -1875,6 +1883,14 @@
18751883
<groupId>org.apache.yetus</groupId>
18761884
<artifactId>audience-annotations</artifactId>
18771885
</exclusion>
1886+
<exclusion>
1887+
<groupId>com.google.errorprone</groupId>
1888+
<artifactId>error_prone_annotations</artifactId>
1889+
</exclusion>
1890+
<exclusion>
1891+
<groupId>org.checkerframework</groupId>
1892+
<artifactId>checker-qual</artifactId>
1893+
</exclusion>
18781894
</exclusions>
18791895
</dependency>
18801896
<dependency>
@@ -2677,33 +2693,6 @@
26772693
</plugins>
26782694
</build>
26792695
</profile>
2680-
<!-- The profile for building against HBase 1.2.x
2681-
This is the default.
2682-
-->
2683-
<profile>
2684-
<id>hbase1</id>
2685-
<activation>
2686-
<property>
2687-
<name>!hbase.profile</name>
2688-
</property>
2689-
</activation>
2690-
<properties>
2691-
<hbase.version>${hbase.one.version}</hbase.version>
2692-
<hbase-compatible-hadoop.version>2.8.5</hbase-compatible-hadoop.version>
2693-
<hbase-compatible-guava.version>12.0.1</hbase-compatible-guava.version>
2694-
<hbase-compatible-guice.version>4.0</hbase-compatible-guice.version>
2695-
<hbase-server-artifactid>hadoop-yarn-server-timelineservice-hbase-server-1</hbase-server-artifactid>
2696-
</properties>
2697-
<dependencyManagement>
2698-
<dependencies>
2699-
<dependency>
2700-
<groupId>org.apache.hadoop</groupId>
2701-
<artifactId>${hbase-server-artifactid}</artifactId>
2702-
<version>${hadoop.version}</version>
2703-
</dependency>
2704-
</dependencies>
2705-
</dependencyManagement>
2706-
</profile>
27072696
<!-- The profile for building against HBase 2.0.0.
27082697
Activate using: mvn -Dhbase.profile=2.0
27092698
-->
@@ -2716,20 +2705,10 @@
27162705
</property>
27172706
</activation>
27182707
<properties>
2719-
<hbase.version>${hbase.two.version}</hbase.version>
2720-
<hbase-compatible-hadoop.version>2.8.5</hbase-compatible-hadoop.version>
2721-
<hbase-compatible-guava.version>11.0.2</hbase-compatible-guava.version>
2722-
<hbase-server-artifactid>hadoop-yarn-server-timelineservice-hbase-server-2</hbase-server-artifactid>
2723-
<hbase-compatible-guice.version>4.0</hbase-compatible-guice.version>
2724-
<hbase-compatible-jetty.version>9.3.27.v20190418</hbase-compatible-jetty.version>
27252708
</properties>
27262709
<dependencyManagement>
27272710
<dependencies>
2728-
<dependency>
2729-
<groupId>org.apache.hadoop</groupId>
2730-
<artifactId>${hbase-server-artifactid}</artifactId>
2731-
<version>${hadoop.version}</version>
2732-
</dependency>
2711+
27332712
</dependencies>
27342713
</dependencyManagement>
27352714
</profile>

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1230,11 +1230,6 @@ public void setOptimizeFooterRead(boolean optimizeFooterRead) {
12301230
this.optimizeFooterRead = optimizeFooterRead;
12311231
}
12321232

1233-
@VisibleForTesting
1234-
public void setFooterReadBufferSize(int footerReadBufferSize) {
1235-
this.footerReadBufferSize = footerReadBufferSize;
1236-
}
1237-
12381233
@VisibleForTesting
12391234
public void setEnableAbfsListIterator(boolean enableAbfsListIterator) {
12401235
this.enableAbfsListIterator = enableAbfsListIterator;

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ private FSDataInputStream open(final Path path,
279279
try {
280280
TracingContext tracingContext = new TracingContext(clientCorrelationId,
281281
fileSystemId, FSOperationType.OPEN, tracingHeaderFormat, listener);
282-
InputStream inputStream = abfsStore
282+
InputStream inputStream = getAbfsStore()
283283
.openFileForRead(qualifiedPath, parameters, statistics, tracingContext);
284284
return new FSDataInputStream(inputStream);
285285
} catch (AzureBlobFileSystemException ex) {

0 commit comments

Comments
 (0)