Skip to content

Commit 70cac02

Browse files
committed
fix javadoc
1 parent d3ad318 commit 70cac02

File tree

2 files changed

+34
-25
lines changed

2 files changed

+34
-25
lines changed

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1338,9 +1338,9 @@ Map<String, List<RemoteLocation>> getAllLocations(String path) throws IOExceptio
13381338
* Get all the locations of the path for {@link RouterClientProtocol#getContentSummary(String)}.
13391339
* For example, there are some mount points:
13401340
* <p>
1341-
* /a -> ns0 -> /a
1342-
* /a/b -> ns0 -> /a/b
1343-
* /a/b/c -> ns1 -> /a/b/c
1341+
* /a -&gt ns0 -&gt /a
1342+
* /a/b -&gt ns0 -&gt /a/b
1343+
* /a/b/c -&gt ns1 -&gt /a/b/c
13441344
* </p>
13451345
* When the path is '/a', the result of locations should be
13461346
* [RemoteLocation('/a', ns0, '/a'), RemoteLocation('/a/b/c', ns1, '/a/b/c')]
@@ -2363,7 +2363,13 @@ private long getModifiedTime(Map<String, Long> ret, String path,
23632363
}
23642364

23652365
/**
2366-
* Get listing on remote locations.
2366+
* Get a partial listing of the indicated directory.
2367+
*
2368+
* @param src the directory name
2369+
* @param startAfter the name to start after
2370+
* @param needLocation if blockLocations need to be returned
2371+
* @return a partial listing starting after startAfter
2372+
* @throws IOException if other I/O error occurred
23672373
*/
23682374
protected List<RemoteResult<RemoteLocation, DirectoryListing>> getListingInt(
23692375
String src, byte[] startAfter, boolean needLocation) throws IOException {
@@ -2402,7 +2408,7 @@ protected List<RemoteResult<RemoteLocation, DirectoryListing>> getListingInt(
24022408
* @param startAfter starting listing from client, used to define listing
24032409
* start boundary
24042410
* @param remainingEntries how many entries left from subcluster
2405-
* @return
2411+
* @return true if should add mount point, otherwise false;
24062412
*/
24072413
protected static boolean shouldAddMountPoint(
24082414
byte[] mountPoint, byte[] lastEntry, byte[] startAfter,

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/async/RouterAsyncClientProtocol.java

Lines changed: 23 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,12 @@ public class RouterAsyncClientProtocol extends RouterClientProtocol {
122122
private String superUser;
123123
/** Identifier for the super group. */
124124
private final String superGroup;
125+
/**
126+
* Caching server defaults so as to prevent redundant calls to namenode,
127+
* similar to DFSClient, caching saves efforts when router connects
128+
* to multiple clients.
129+
*/
130+
private volatile FsServerDefaults serverDefaults;
125131
private final RouterSnapshot asyncSnapshotProto;
126132
private final ErasureCoding asyncErasureCoding;
127133
private final RouterCacheAdmin routerAsyncCacheAdmin;
@@ -147,17 +153,17 @@ public RouterAsyncClientProtocol(Configuration conf, RouterRpcServer rpcServer)
147153
public FsServerDefaults getServerDefaults() throws IOException {
148154
rpcServer.checkOperation(NameNode.OperationCategory.READ);
149155
long now = Time.monotonicNow();
150-
if ((getServerDefaults() == null) || (now - getServerDefaultsLastUpdate()
156+
if ((serverDefaults == null) || (now - getServerDefaultsLastUpdate()
151157
> getServerDefaultsValidityPeriod())) {
152158
RemoteMethod method = new RemoteMethod("getServerDefaults");
153159
rpcServer.invokeAtAvailableNsAsync(method, FsServerDefaults.class);
154160
asyncApply(o -> {
155-
setServerDefaults((FsServerDefaults) o);
161+
serverDefaults = (FsServerDefaults) o;
156162
setServerDefaultsLastUpdate(now);
157-
return getServerDefaults();
163+
return serverDefaults;
158164
});
159165
} else {
160-
asyncComplete(getServerDefaults());
166+
asyncComplete(serverDefaults);
161167
}
162168
return asyncReturn(FsServerDefaults.class);
163169
}
@@ -778,22 +784,19 @@ public HdfsFileStatus getFileInfo(String src, boolean withMountTable) throws IOE
778784
return;
779785
}
780786
// If there is no real path, check mount points
781-
if (ret == null) {
782-
List<String> children = subclusterResolver.getMountPoints(src);
783-
if (children != null && !children.isEmpty()) {
784-
Map<String, Long> dates = getMountPointDates(src);
785-
long date = 0;
786-
if (dates != null && dates.containsKey(src)) {
787-
date = dates.get(src);
788-
}
789-
getMountPointStatus(src, children.size(), date, false);
790-
} else if (children != null) {
791-
// The src is a mount point, but there are no files or directories
792-
getMountPointStatus(src, 0, 0, false);
793-
} else {
794-
asyncComplete(null);
795-
return;
787+
List<String> children = subclusterResolver.getMountPoints(src);
788+
if (children != null && !children.isEmpty()) {
789+
Map<String, Long> dates = getMountPointDates(src);
790+
long date = 0;
791+
if (dates != null && dates.containsKey(src)) {
792+
date = dates.get(src);
796793
}
794+
getMountPointStatus(src, children.size(), date, false);
795+
} else if (children != null) {
796+
// The src is a mount point, but there are no files or directories
797+
getMountPointStatus(src, 0, 0, false);
798+
} else {
799+
asyncComplete(null);
797800
}
798801
});
799802

@@ -803,7 +806,7 @@ public HdfsFileStatus getFileInfo(String src, boolean withMountTable) throws IOE
803806
}
804807
// Can't find mount point for path and the path didn't contain any sub monit points,
805808
// throw the NoLocationException to client.
806-
if (ret == null && noLocationException[0] != null) {
809+
if (noLocationException[0] != null) {
807810
throw noLocationException[0];
808811
}
809812
return null;

0 commit comments

Comments
 (0)