Skip to content

Commit 4fbf034

Browse files
authored
Merge branch 'apache:trunk' into HADOOP-19136
2 parents 9965c28 + 2645898 commit 4fbf034

File tree

45 files changed

+1609
-377
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+1609
-377
lines changed

hadoop-common-project/hadoop-auth/pom.xml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,11 @@
136136
</dependency>
137137
<dependency>
138138
<groupId>org.apache.kerby</groupId>
139-
<artifactId>kerb-simplekdc</artifactId>
139+
<artifactId>kerb-core</artifactId>
140+
</dependency>
141+
<dependency>
142+
<groupId>org.apache.kerby</groupId>
143+
<artifactId>kerb-util</artifactId>
140144
</dependency>
141145
<dependency>
142146
<groupId>org.apache.directory.server</groupId>
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.security;
19+
20+
import org.apache.hadoop.classification.InterfaceAudience;
21+
import org.apache.hadoop.classification.InterfaceStability;
22+
import org.slf4j.Logger;
23+
import org.slf4j.LoggerFactory;
24+
25+
/**
26+
* SASL related constants.
27+
*/
28+
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
29+
@InterfaceStability.Evolving
30+
public class SaslConstants {
31+
public static final Logger LOG = LoggerFactory.getLogger(SaslConstants.class);
32+
33+
private static final String SASL_MECHANISM_ENV = "HADOOP_SASL_MECHANISM";
34+
public static final String SASL_MECHANISM;
35+
private static final String SASL_MECHANISM_DEFAULT = "DIGEST-MD5";
36+
37+
static {
38+
final String mechanism = System.getenv(SASL_MECHANISM_ENV);
39+
LOG.debug("{} = {} (env)", SASL_MECHANISM_ENV, mechanism);
40+
SASL_MECHANISM = mechanism != null? mechanism : SASL_MECHANISM_DEFAULT;
41+
LOG.debug("{} = {} (effective)", SASL_MECHANISM_ENV, SASL_MECHANISM);
42+
}
43+
44+
private SaslConstants() {}
45+
}

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -223,8 +223,8 @@ public enum AuthMethod {
223223
SIMPLE((byte) 80, ""),
224224
KERBEROS((byte) 81, "GSSAPI"),
225225
@Deprecated
226-
DIGEST((byte) 82, "DIGEST-MD5"),
227-
TOKEN((byte) 82, "DIGEST-MD5"),
226+
DIGEST((byte) 82, SaslConstants.SASL_MECHANISM),
227+
TOKEN((byte) 82, SaslConstants.SASL_MECHANISM),
228228
PLAIN((byte) 83, "PLAIN");
229229

230230
/** The code for this method. */
@@ -273,7 +273,7 @@ public void write(DataOutput out) throws IOException {
273273
}
274274
};
275275

276-
/** CallbackHandler for SASL DIGEST-MD5 mechanism */
276+
/** CallbackHandler for SASL mechanism. */
277277
@InterfaceStability.Evolving
278278
public static class SaslDigestCallbackHandler implements CallbackHandler {
279279
private SecretManager<TokenIdentifier> secretManager;
@@ -309,7 +309,7 @@ public void handle(Callback[] callbacks) throws InvalidToken,
309309
continue; // realm is ignored
310310
} else {
311311
throw new UnsupportedCallbackException(callback,
312-
"Unrecognized SASL DIGEST-MD5 Callback");
312+
"Unrecognized SASL Callback");
313313
}
314314
}
315315
if (pc != null) {
@@ -319,11 +319,8 @@ public void handle(Callback[] callbacks) throws InvalidToken,
319319
UserGroupInformation user = null;
320320
user = tokenIdentifier.getUser(); // may throw exception
321321
connection.attemptingUser = user;
322-
323-
if (LOG.isDebugEnabled()) {
324-
LOG.debug("SASL server DIGEST-MD5 callback: setting password "
325-
+ "for client: " + tokenIdentifier.getUser());
326-
}
322+
323+
LOG.debug("SASL server callback: setting password for client: {}", user);
327324
pc.setPassword(password);
328325
}
329326
if (ac != null) {
@@ -339,8 +336,7 @@ public void handle(Callback[] callbacks) throws InvalidToken,
339336
UserGroupInformation logUser =
340337
getIdentifier(authzid, secretManager).getUser();
341338
String username = logUser == null ? null : logUser.getUserName();
342-
LOG.debug("SASL server DIGEST-MD5 callback: setting "
343-
+ "canonicalized client ID: " + username);
339+
LOG.debug("SASL server callback: setting authorizedID: {}", username);
344340
}
345341
ac.setAuthorizedID(authzid);
346342
}

hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -326,6 +326,15 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
326326
| `FSN(Read/Write)LockOverallNanosAvgTime` | Average time of holding the lock by all operations in nanoseconds |
327327
| `PendingSPSPaths` | The number of paths to be processed by storage policy satisfier |
328328

329+
BlockManager
330+
-------------
331+
332+
The metrics present statistics from the BlockManager's perspective.
333+
334+
| Name | Description |
335+
|:---- |:--------------------------------------------------------------------------------------------------------------------------------|
336+
| `StorageTypeStats` | key represents different StorageTypes, and value represents the detailed storage information corresponding to each StorageType. |
337+
329338
JournalNode
330339
-----------
331340

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -536,7 +536,7 @@ public void handle(Callback[] callbacks)
536536
private static Pattern BadToken =
537537
Pattern.compile("^" + RemoteException.class.getName() +
538538
"\\("+ SaslException.class.getName() + "\\): " +
539-
"DIGEST-MD5: digest response format violation.*");
539+
SaslConstants.SASL_MECHANISM + ": digest response format violation.*");
540540
private static Pattern KrbFailed =
541541
Pattern.compile(".*Failed on local exception:.* " +
542542
"Failed to specify server's Kerberos principal name.*");

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderIoProvider.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ public BlockReaderIoProvider(@Nullable ShortCircuitConf conf,
6565
public int read(FileChannel dataIn, ByteBuffer dst, long position)
6666
throws IOException{
6767
final int nRead;
68-
if (isEnabled && (ThreadLocalRandom.current().nextInt() < sampleRangeMax)) {
68+
if (isEnabled && (ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE) < sampleRangeMax)) {
6969
long begin = timer.monotonicNow();
7070
nRead = dataIn.read(dst, position);
7171
long latency = timer.monotonicNow() - begin;

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
3333
import org.apache.hadoop.security.FastSaslClientFactory;
3434
import org.apache.hadoop.security.FastSaslServerFactory;
35+
import org.apache.hadoop.security.SaslConstants;
3536
import org.apache.hadoop.security.SaslInputStream;
3637
import org.apache.hadoop.security.SaslOutputStream;
3738

@@ -50,7 +51,7 @@ class SaslParticipant {
5051
// a short string.
5152
private static final String SERVER_NAME = "0";
5253
private static final String PROTOCOL = "hdfs";
53-
private static final String MECHANISM = "DIGEST-MD5";
54+
private static final String[] MECHANISM_ARRAY = {SaslConstants.SASL_MECHANISM};
5455

5556
// One of these will always be null.
5657
private final SaslServer saslServer;
@@ -81,7 +82,7 @@ public static SaslParticipant createServerSaslParticipant(
8182
Map<String, String> saslProps, CallbackHandler callbackHandler)
8283
throws SaslException {
8384
initializeSaslServerFactory();
84-
return new SaslParticipant(saslServerFactory.createSaslServer(MECHANISM,
85+
return new SaslParticipant(saslServerFactory.createSaslServer(MECHANISM_ARRAY[0],
8586
PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
8687
}
8788

@@ -99,7 +100,7 @@ public static SaslParticipant createClientSaslParticipant(String userName,
99100
throws SaslException {
100101
initializeSaslClientFactory();
101102
return new SaslParticipant(
102-
saslClientFactory.createSaslClient(new String[] {MECHANISM}, userName,
103+
saslClientFactory.createSaslClient(MECHANISM_ARRAY, userName,
103104
PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
104105
}
105106

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ public void overrideExpiredRecords(QueryResult<R> query) throws IOException {
189189
LOG.warn("Couldn't delete State Store record {}: {}", recordName,
190190
record);
191191
}
192-
} else if (record.checkExpired(currentDriverTime)) {
192+
} else if (!record.isExpired() && record.checkExpired(currentDriverTime)) {
193193
String recordName = StateStoreUtils.getRecordName(record.getClass());
194194
LOG.info("Override State Store record {}: {}", recordName, record);
195195
commitRecords.add(record);

hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,18 @@
2929
import static org.junit.Assert.assertNotNull;
3030
import static org.junit.Assert.assertNull;
3131
import static org.junit.Assert.assertTrue;
32+
import static org.mockito.ArgumentMatchers.any;
33+
import static org.mockito.Mockito.doAnswer;
34+
import static org.mockito.Mockito.spy;
3235

3336
import java.io.IOException;
3437
import java.util.ArrayList;
3538
import java.util.List;
3639
import java.util.Set;
40+
import java.util.concurrent.ExecutionException;
41+
import java.util.concurrent.ExecutorService;
42+
import java.util.concurrent.Executors;
43+
import java.util.concurrent.Future;
3744
import java.util.concurrent.TimeUnit;
3845
import java.util.concurrent.TimeoutException;
3946

@@ -49,16 +56,22 @@
4956
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateNamenodeRegistrationRequest;
5057
import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
5158
import org.apache.hadoop.test.GenericTestUtils;
59+
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
5260
import org.apache.hadoop.util.Time;
5361
import org.junit.Before;
5462
import org.junit.BeforeClass;
5563
import org.junit.Test;
64+
import org.slf4j.Logger;
65+
import org.slf4j.LoggerFactory;
5666

5767
/**
5868
* Test the basic {@link MembershipStore} membership functionality.
5969
*/
6070
public class TestStateStoreMembershipState extends TestStateStoreBase {
6171

72+
private static Logger LOG = LoggerFactory.getLogger(
73+
TestStateStoreMembershipState.class);
74+
6275
private static MembershipStore membershipStore;
6376

6477
@BeforeClass
@@ -529,6 +542,94 @@ public void testRegistrationExpiredAndDeletion()
529542
}, 100, 3000);
530543
}
531544

545+
@Test
546+
public void testRegistrationExpiredRaceCondition()
547+
throws InterruptedException, IOException, TimeoutException, ExecutionException {
548+
549+
// Populate the state store with a single NN element
550+
// 1) ns0:nn0 - Expired
551+
// Create a thread to refresh the cached records, pulling the expired record
552+
// into the thread's memory
553+
// Then insert an active record, and confirm that the refresh thread does not
554+
// override the active record with the expired record it has in memory
555+
556+
MembershipState.setDeletionMs(-1);
557+
558+
MembershipState expiredReport = createRegistration(
559+
NAMESERVICES[0], NAMENODES[0], ROUTERS[0],
560+
FederationNamenodeServiceState.ACTIVE);
561+
expiredReport.setDateModified(Time.monotonicNow() - 5000);
562+
expiredReport.setState(FederationNamenodeServiceState.EXPIRED);
563+
assertTrue(namenodeHeartbeat(expiredReport));
564+
565+
// Load cache
566+
MembershipStore memStoreSpy = spy(membershipStore);
567+
DelayAnswer delayer = new DelayAnswer(LOG);
568+
doAnswer(delayer).when(memStoreSpy).overrideExpiredRecords(any());
569+
570+
ExecutorService pool = Executors.newFixedThreadPool(1);
571+
572+
Future<Boolean> cacheRefreshFuture = pool.submit(() -> {
573+
try {
574+
return memStoreSpy.loadCache(true);
575+
} catch (IOException e) {
576+
LOG.error("Exception while loading cache:", e);
577+
}
578+
return false;
579+
});
580+
581+
// Verify quorum and entry
582+
MembershipState quorumEntry = getNamenodeRegistration(
583+
expiredReport.getNameserviceId(), expiredReport.getNamenodeId());
584+
assertNull(quorumEntry);
585+
586+
587+
MembershipState record = membershipStore.getDriver()
588+
.get(MembershipState.class).getRecords().get(0);
589+
assertNotNull(record);
590+
assertEquals(ROUTERS[0], record.getRouterId());
591+
assertEquals(FederationNamenodeServiceState.EXPIRED,
592+
record.getState());
593+
594+
// Insert active while the other thread refreshing it's cache
595+
MembershipState activeReport = createRegistration(
596+
NAMESERVICES[0], NAMENODES[0], ROUTERS[0],
597+
FederationNamenodeServiceState.ACTIVE);
598+
599+
delayer.waitForCall();
600+
assertTrue(namenodeHeartbeat(activeReport));
601+
602+
record = membershipStore.getDriver()
603+
.get(MembershipState.class).getRecords().get(0);
604+
assertNotNull(record);
605+
assertEquals(ROUTERS[0], record.getRouterId());
606+
assertEquals(FederationNamenodeServiceState.ACTIVE,
607+
record.getState());
608+
609+
quorumEntry = getExpiredNamenodeRegistration(
610+
expiredReport.getNameserviceId(), expiredReport.getNamenodeId());
611+
assertNull(quorumEntry);
612+
613+
// Allow the thread to finish refreshing the cache
614+
delayer.proceed();
615+
assertTrue(cacheRefreshFuture.get(5, TimeUnit.SECONDS));
616+
617+
// The state store should still be the active report
618+
record = membershipStore.getDriver()
619+
.get(MembershipState.class).getRecords().get(0);
620+
assertNotNull(record);
621+
assertEquals(ROUTERS[0], record.getRouterId());
622+
assertEquals(FederationNamenodeServiceState.ACTIVE,
623+
record.getState());
624+
625+
membershipStore.loadCache(true);
626+
627+
quorumEntry = getExpiredNamenodeRegistration(
628+
expiredReport.getNameserviceId(),
629+
expiredReport.getNamenodeId());
630+
assertNull(quorumEntry);
631+
}
632+
532633
@Test
533634
public void testNamespaceInfoWithUnavailableNameNodeRegistration()
534635
throws IOException {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ public void handle(Callback[] callbacks) throws IOException,
241241
continue; // realm is ignored
242242
} else {
243243
throw new UnsupportedCallbackException(callback,
244-
"Unrecognized SASL DIGEST-MD5 Callback: " + callback);
244+
"Unrecognized SASL Callback: " + callback);
245245
}
246246
}
247247

0 commit comments

Comments
 (0)